var/home/core/zuul-output/0000755000175000017500000000000015111114644014523 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111120773015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000003447615315111120764017710 0ustar rootrootNov 24 17:48:29 crc systemd[1]: Starting Kubernetes Kubelet... Nov 24 17:48:29 crc restorecon[4697]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:29 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:48:30 crc restorecon[4697]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 24 17:48:31 crc kubenswrapper[4702]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.279831 4702 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298727 4702 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298770 4702 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298775 4702 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298778 4702 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298783 4702 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298790 4702 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298816 4702 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298821 4702 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298825 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298829 4702 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298835 4702 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298839 4702 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298845 4702 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298849 4702 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298854 4702 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298859 4702 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298863 4702 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298868 4702 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298872 4702 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298878 4702 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298882 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298886 4702 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298890 4702 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298899 4702 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298903 4702 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298907 4702 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298911 4702 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298915 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298918 4702 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298922 4702 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298926 4702 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298929 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298933 4702 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298938 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298942 4702 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298947 4702 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298952 4702 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298956 4702 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298960 4702 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298965 4702 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298969 4702 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298974 4702 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298978 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298982 4702 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298986 4702 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298991 4702 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.298995 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299000 4702 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299004 4702 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299008 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299012 4702 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299016 4702 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299020 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299024 4702 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299028 4702 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299031 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299035 4702 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299039 4702 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299042 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299046 4702 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299051 4702 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299056 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299060 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299064 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299068 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299071 4702 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299075 4702 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299078 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299082 4702 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299085 4702 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.299089 4702 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299196 4702 flags.go:64] FLAG: --address="0.0.0.0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299208 4702 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299220 4702 flags.go:64] FLAG: --anonymous-auth="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299226 4702 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299232 4702 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299236 4702 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299242 4702 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299248 4702 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299252 4702 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299256 4702 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299262 4702 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299300 4702 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299305 4702 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299309 4702 flags.go:64] FLAG: --cgroup-root="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299314 4702 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299318 4702 flags.go:64] FLAG: --client-ca-file="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299322 4702 flags.go:64] FLAG: --cloud-config="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299326 4702 flags.go:64] FLAG: --cloud-provider="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299330 4702 flags.go:64] FLAG: --cluster-dns="[]" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299341 4702 flags.go:64] FLAG: --cluster-domain="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299345 4702 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299350 4702 flags.go:64] FLAG: --config-dir="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299355 4702 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299361 4702 flags.go:64] FLAG: --container-log-max-files="5" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299367 4702 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299373 4702 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299378 4702 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299382 4702 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299387 4702 flags.go:64] FLAG: --contention-profiling="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299392 4702 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299396 4702 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299401 4702 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299405 4702 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299412 4702 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299416 4702 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299421 4702 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299425 4702 flags.go:64] FLAG: --enable-load-reader="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299429 4702 flags.go:64] FLAG: --enable-server="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299433 4702 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299443 4702 flags.go:64] FLAG: --event-burst="100" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299448 4702 flags.go:64] FLAG: --event-qps="50" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299453 4702 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299459 4702 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299463 4702 flags.go:64] FLAG: --eviction-hard="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299469 4702 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299474 4702 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299478 4702 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299489 4702 flags.go:64] FLAG: --eviction-soft="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299494 4702 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299499 4702 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299503 4702 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299507 4702 flags.go:64] FLAG: --experimental-mounter-path="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299512 4702 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299516 4702 flags.go:64] FLAG: --fail-swap-on="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299520 4702 flags.go:64] FLAG: --feature-gates="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299525 4702 flags.go:64] FLAG: --file-check-frequency="20s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299530 4702 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299534 4702 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299539 4702 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299543 4702 flags.go:64] FLAG: --healthz-port="10248" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299548 4702 flags.go:64] FLAG: --help="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299552 4702 flags.go:64] FLAG: --hostname-override="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299557 4702 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299561 4702 flags.go:64] FLAG: --http-check-frequency="20s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299565 4702 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299570 4702 flags.go:64] FLAG: --image-credential-provider-config="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299573 4702 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299578 4702 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299582 4702 flags.go:64] FLAG: --image-service-endpoint="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299586 4702 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299591 4702 flags.go:64] FLAG: --kube-api-burst="100" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299596 4702 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299602 4702 flags.go:64] FLAG: --kube-api-qps="50" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299608 4702 flags.go:64] FLAG: --kube-reserved="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299613 4702 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299620 4702 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299626 4702 flags.go:64] FLAG: --kubelet-cgroups="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299632 4702 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299637 4702 flags.go:64] FLAG: --lock-file="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299642 4702 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299648 4702 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299653 4702 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299668 4702 flags.go:64] FLAG: --log-json-split-stream="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299680 4702 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299685 4702 flags.go:64] FLAG: --log-text-split-stream="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299690 4702 flags.go:64] FLAG: --logging-format="text" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299696 4702 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299701 4702 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299706 4702 flags.go:64] FLAG: --manifest-url="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299710 4702 flags.go:64] FLAG: --manifest-url-header="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299717 4702 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299721 4702 flags.go:64] FLAG: --max-open-files="1000000" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299727 4702 flags.go:64] FLAG: --max-pods="110" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299731 4702 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299735 4702 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299739 4702 flags.go:64] FLAG: --memory-manager-policy="None" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299744 4702 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299748 4702 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299752 4702 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299756 4702 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299768 4702 flags.go:64] FLAG: --node-status-max-images="50" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299772 4702 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299777 4702 flags.go:64] FLAG: --oom-score-adj="-999" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299781 4702 flags.go:64] FLAG: --pod-cidr="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299786 4702 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299811 4702 flags.go:64] FLAG: --pod-manifest-path="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299816 4702 flags.go:64] FLAG: --pod-max-pids="-1" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299821 4702 flags.go:64] FLAG: --pods-per-core="0" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299825 4702 flags.go:64] FLAG: --port="10250" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299830 4702 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299834 4702 flags.go:64] FLAG: --provider-id="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299838 4702 flags.go:64] FLAG: --qos-reserved="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299843 4702 flags.go:64] FLAG: --read-only-port="10255" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299847 4702 flags.go:64] FLAG: --register-node="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299851 4702 flags.go:64] FLAG: --register-schedulable="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299855 4702 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299864 4702 flags.go:64] FLAG: --registry-burst="10" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299868 4702 flags.go:64] FLAG: --registry-qps="5" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299872 4702 flags.go:64] FLAG: --reserved-cpus="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299883 4702 flags.go:64] FLAG: --reserved-memory="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299889 4702 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299893 4702 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299897 4702 flags.go:64] FLAG: --rotate-certificates="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299901 4702 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299905 4702 flags.go:64] FLAG: --runonce="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299909 4702 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299914 4702 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299919 4702 flags.go:64] FLAG: --seccomp-default="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299923 4702 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299927 4702 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299932 4702 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299936 4702 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299941 4702 flags.go:64] FLAG: --storage-driver-password="root" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299945 4702 flags.go:64] FLAG: --storage-driver-secure="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299949 4702 flags.go:64] FLAG: --storage-driver-table="stats" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299953 4702 flags.go:64] FLAG: --storage-driver-user="root" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299957 4702 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299967 4702 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299971 4702 flags.go:64] FLAG: --system-cgroups="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299977 4702 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299984 4702 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299988 4702 flags.go:64] FLAG: --tls-cert-file="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.299993 4702 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300001 4702 flags.go:64] FLAG: --tls-min-version="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300005 4702 flags.go:64] FLAG: --tls-private-key-file="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300010 4702 flags.go:64] FLAG: --topology-manager-policy="none" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300014 4702 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300018 4702 flags.go:64] FLAG: --topology-manager-scope="container" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300023 4702 flags.go:64] FLAG: --v="2" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300031 4702 flags.go:64] FLAG: --version="false" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300037 4702 flags.go:64] FLAG: --vmodule="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300043 4702 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300047 4702 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300180 4702 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300191 4702 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300198 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300204 4702 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300209 4702 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300215 4702 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300219 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300223 4702 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300227 4702 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300231 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300235 4702 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300238 4702 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300243 4702 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300248 4702 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300252 4702 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300256 4702 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300261 4702 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300265 4702 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300270 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300274 4702 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300278 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300282 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300285 4702 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300289 4702 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300293 4702 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300297 4702 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300301 4702 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300305 4702 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300308 4702 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300312 4702 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300316 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300320 4702 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300323 4702 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300327 4702 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300331 4702 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300336 4702 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300340 4702 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300344 4702 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300348 4702 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300352 4702 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300356 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300360 4702 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300364 4702 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300367 4702 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300371 4702 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300375 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300379 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300383 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300386 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300390 4702 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300394 4702 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300398 4702 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300403 4702 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300407 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300411 4702 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300416 4702 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300420 4702 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300424 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300428 4702 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300432 4702 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300435 4702 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300439 4702 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300442 4702 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300446 4702 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300450 4702 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300453 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300456 4702 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300460 4702 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300463 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300467 4702 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.300471 4702 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.300484 4702 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.314479 4702 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.314509 4702 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314582 4702 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314590 4702 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314594 4702 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314598 4702 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314601 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314605 4702 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314609 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314612 4702 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314616 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314619 4702 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314623 4702 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314626 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314629 4702 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314633 4702 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314637 4702 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314640 4702 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314644 4702 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314647 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314652 4702 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314658 4702 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314663 4702 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314668 4702 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314673 4702 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314678 4702 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314682 4702 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314686 4702 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314690 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314695 4702 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314699 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314703 4702 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314707 4702 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314711 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314715 4702 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314719 4702 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314724 4702 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314727 4702 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314731 4702 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314734 4702 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314738 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314741 4702 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314745 4702 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314749 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314752 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314756 4702 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314759 4702 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314763 4702 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314766 4702 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314770 4702 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314773 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314777 4702 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314780 4702 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314784 4702 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314787 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314792 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314809 4702 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314812 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314816 4702 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314820 4702 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314824 4702 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314827 4702 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314831 4702 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314834 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314837 4702 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314841 4702 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314845 4702 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314848 4702 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314852 4702 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314855 4702 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314859 4702 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314862 4702 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314867 4702 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.314874 4702 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314984 4702 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314991 4702 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314995 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.314999 4702 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315002 4702 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315006 4702 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315010 4702 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315014 4702 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315017 4702 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315020 4702 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315024 4702 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315028 4702 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315031 4702 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315035 4702 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315040 4702 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315044 4702 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315047 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315051 4702 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315056 4702 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315061 4702 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315065 4702 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315068 4702 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315072 4702 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315075 4702 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315079 4702 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315082 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315086 4702 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315090 4702 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315095 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315099 4702 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315103 4702 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315107 4702 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315112 4702 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315116 4702 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315120 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315124 4702 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315128 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315132 4702 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315136 4702 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315140 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315144 4702 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315147 4702 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315151 4702 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315154 4702 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315158 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315162 4702 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315166 4702 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315169 4702 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315173 4702 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315176 4702 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315180 4702 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315183 4702 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315187 4702 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315191 4702 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315195 4702 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315198 4702 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315202 4702 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315206 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315210 4702 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315214 4702 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315217 4702 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315221 4702 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315224 4702 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315228 4702 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315231 4702 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315235 4702 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315238 4702 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315241 4702 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315245 4702 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315248 4702 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.315252 4702 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.315258 4702 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.315409 4702 server.go:940] "Client rotation is on, will bootstrap in background" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.321576 4702 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.322775 4702 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.326340 4702 server.go:997] "Starting client certificate rotation" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.326364 4702 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.328327 4702 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-04 13:52:03.514414077 +0000 UTC Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.328438 4702 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 980h3m32.185979375s for next certificate rotation Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.402391 4702 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.407737 4702 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.432744 4702 log.go:25] "Validated CRI v1 runtime API" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.530700 4702 log.go:25] "Validated CRI v1 image API" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.533753 4702 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.543125 4702 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-24-17-44-08-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.543154 4702 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.558265 4702 manager.go:217] Machine: {Timestamp:2025-11-24 17:48:31.554015527 +0000 UTC m=+0.794756711 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:cad111b8-871f-4060-8514-4607c81be6e2 BootID:2d33a4b2-a511-45b9-8f09-968167e4e730 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:8e:78:ee Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:8e:78:ee Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:7d:34:93 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:51:21:14 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:9b:88:1b Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:92:ce:d7 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:e6:04:5b:c6:5a:2b Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:92:9b:88:ea:1c:b9 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.558467 4702 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.558589 4702 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.561087 4702 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.561424 4702 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.561485 4702 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.561793 4702 topology_manager.go:138] "Creating topology manager with none policy" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.561848 4702 container_manager_linux.go:303] "Creating device plugin manager" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.562662 4702 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.562716 4702 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.562983 4702 state_mem.go:36] "Initialized new in-memory state store" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.563090 4702 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.568729 4702 kubelet.go:418] "Attempting to sync node with API server" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.568762 4702 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.568785 4702 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.568834 4702 kubelet.go:324] "Adding apiserver pod source" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.568851 4702 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.573495 4702 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.574986 4702 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.577215 4702 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.578735 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.578886 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.579036 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.579293 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579879 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579917 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579926 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579933 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579951 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579958 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579985 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.579998 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.580006 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.580014 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.580043 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.580058 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.581519 4702 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.582137 4702 server.go:1280] "Started kubelet" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.586207 4702 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.586251 4702 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.586900 4702 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:31 crc systemd[1]: Started Kubernetes Kubelet. Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.587815 4702 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591480 4702 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591578 4702 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591786 4702 server.go:460] "Adding debug handlers to kubelet server" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.591818 4702 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591809 4702 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 16:00:01.068333919 +0000 UTC Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591866 4702 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 238h11m29.476470911s for next certificate rotation Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591906 4702 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.591918 4702 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.593619 4702 factory.go:55] Registering systemd factory Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.593653 4702 factory.go:221] Registration of the systemd container factory successfully Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.594337 4702 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.595327 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.595409 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.595843 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="200ms" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.596337 4702 factory.go:153] Registering CRI-O factory Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.596367 4702 factory.go:221] Registration of the crio container factory successfully Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.596651 4702 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.596683 4702 factory.go:103] Registering Raw factory Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.596703 4702 manager.go:1196] Started watching for new ooms in manager Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.598038 4702 manager.go:319] Starting recovery of all containers Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.597965 4702 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.65:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b02938053c9cf default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-24 17:48:31.582104015 +0000 UTC m=+0.822845179,LastTimestamp:2025-11-24 17:48:31.582104015 +0000 UTC m=+0.822845179,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608420 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608531 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608556 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608575 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608596 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608618 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608637 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608651 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608690 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608704 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608760 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608851 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608889 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608906 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608919 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608935 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608948 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.608989 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609005 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609042 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609058 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609070 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609089 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609104 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609117 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609131 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609147 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609197 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609214 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609234 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609253 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609267 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609760 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609787 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609844 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609876 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609891 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609906 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609920 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609934 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609952 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609966 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.609983 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610040 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610119 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610137 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610150 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610164 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610178 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610191 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610204 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610277 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610341 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610357 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610374 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610391 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610452 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610467 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610482 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610519 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610533 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610548 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610561 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610574 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610589 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610607 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610621 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610653 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610667 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610681 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610694 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610708 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610723 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610735 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610751 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610834 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610875 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610890 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610905 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610920 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610935 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610957 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.610971 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611008 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611026 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611041 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611057 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611071 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611085 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611100 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611114 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611131 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611148 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611183 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611197 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611214 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611235 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.611255 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.616978 4702 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617046 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617075 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617089 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617101 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617117 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617132 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617162 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617182 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617202 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617224 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617325 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617346 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617390 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617411 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617424 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617440 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617454 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617470 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617607 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617669 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617727 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617751 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617764 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617811 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617826 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617855 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617877 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617889 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617906 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617923 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617939 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617958 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617969 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617986 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.617997 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618008 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618100 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618114 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618129 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618143 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618155 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618171 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618184 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618198 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618214 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618875 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618896 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618911 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618922 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618933 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618949 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618961 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618977 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.618992 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619002 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619015 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619025 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619039 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619049 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619058 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619070 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619080 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619091 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619101 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619110 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619126 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619136 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619149 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619165 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619177 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619959 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.619991 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620008 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620024 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620039 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620054 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620067 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620084 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620099 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620114 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620130 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620145 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620161 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620175 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620749 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620789 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620820 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620834 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620847 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620878 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620890 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620905 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620918 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620933 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620946 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620959 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620975 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.620989 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621005 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621016 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621030 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621043 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621085 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621097 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621111 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621125 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621142 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621157 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621173 4702 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621186 4702 reconstruct.go:97] "Volume reconstruction finished" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.621197 4702 reconciler.go:26] "Reconciler: start to sync state" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.622927 4702 manager.go:324] Recovery completed Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.632410 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.634015 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.634070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.634082 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.635014 4702 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.635039 4702 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.635068 4702 state_mem.go:36] "Initialized new in-memory state store" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.644787 4702 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.646767 4702 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.646834 4702 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.646867 4702 kubelet.go:2335] "Starting kubelet main sync loop" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.646927 4702 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.647334 4702 policy_none.go:49] "None policy: Start" Nov 24 17:48:31 crc kubenswrapper[4702]: W1124 17:48:31.647604 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.647679 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.648049 4702 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.648082 4702 state_mem.go:35] "Initializing new in-memory state store" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.692868 4702 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.694202 4702 manager.go:334] "Starting Device Plugin manager" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.694271 4702 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.694288 4702 server.go:79] "Starting device plugin registration server" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.694873 4702 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.694892 4702 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.695329 4702 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.695459 4702 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.695471 4702 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.703071 4702 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.748026 4702 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.748190 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.749661 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.749707 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.749716 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.749910 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750261 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750338 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750820 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750853 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750871 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.750977 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.751108 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.751138 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.751353 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.751386 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.751396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.753169 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.753206 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.753216 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756403 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756448 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756589 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756698 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.756731 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758033 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758072 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758080 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758226 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758255 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758267 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758378 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758454 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.758495 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759851 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759882 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759897 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759904 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759918 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.759925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.760090 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.760111 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.760663 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.760692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.760712 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.795154 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.796463 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="400ms" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.796617 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.796667 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.796680 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.796707 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:31 crc kubenswrapper[4702]: E1124 17:48:31.797200 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823448 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823483 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823508 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823525 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823567 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823665 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823695 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823719 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823756 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823788 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823825 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823851 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823876 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823900 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.823923 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925512 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925586 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925622 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925650 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925680 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925698 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925737 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925708 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925774 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925767 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925825 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925833 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925791 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925818 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925815 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925786 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925910 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925933 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925955 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.925977 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926033 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926071 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926096 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926122 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926125 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926136 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926156 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926179 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926136 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.926292 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.998054 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.999457 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.999531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.999549 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:31 crc kubenswrapper[4702]: I1124 17:48:31.999589 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.000337 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.072546 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.091290 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.109052 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.114906 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-22fa280ae862a66c105c2e6667456052feb99da9ccaad30caf69e9e51407c2e9 WatchSource:0}: Error finding container 22fa280ae862a66c105c2e6667456052feb99da9ccaad30caf69e9e51407c2e9: Status 404 returned error can't find the container with id 22fa280ae862a66c105c2e6667456052feb99da9ccaad30caf69e9e51407c2e9 Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.122224 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.126483 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-854c4fbdef9cf14a9e3d2de817755f7c24a1f0f41df20cb6060ad537ac65d6aa WatchSource:0}: Error finding container 854c4fbdef9cf14a9e3d2de817755f7c24a1f0f41df20cb6060ad537ac65d6aa: Status 404 returned error can't find the container with id 854c4fbdef9cf14a9e3d2de817755f7c24a1f0f41df20cb6060ad537ac65d6aa Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.133356 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-4e1b289826613c4c58628d06687e688e27df7cfff8b53bf7cdfe0f6bfda37a8f WatchSource:0}: Error finding container 4e1b289826613c4c58628d06687e688e27df7cfff8b53bf7cdfe0f6bfda37a8f: Status 404 returned error can't find the container with id 4e1b289826613c4c58628d06687e688e27df7cfff8b53bf7cdfe0f6bfda37a8f Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.134334 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-a55cad2e3f25d7dc8621d8432101f2b27a5ed1d1f2e1441783960559e47f1436 WatchSource:0}: Error finding container a55cad2e3f25d7dc8621d8432101f2b27a5ed1d1f2e1441783960559e47f1436: Status 404 returned error can't find the container with id a55cad2e3f25d7dc8621d8432101f2b27a5ed1d1f2e1441783960559e47f1436 Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.140996 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.158207 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-cda593db0db3d3c3cf2d905ab78e83de3fe1ca0821b132d5027811f95ee2383c WatchSource:0}: Error finding container cda593db0db3d3c3cf2d905ab78e83de3fe1ca0821b132d5027811f95ee2383c: Status 404 returned error can't find the container with id cda593db0db3d3c3cf2d905ab78e83de3fe1ca0821b132d5027811f95ee2383c Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.198046 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="800ms" Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.389977 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.390163 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.401441 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.403014 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.403067 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.403077 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.403107 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.403655 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.588624 4702 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.625538 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.625701 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.651109 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"22fa280ae862a66c105c2e6667456052feb99da9ccaad30caf69e9e51407c2e9"} Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.652188 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"cda593db0db3d3c3cf2d905ab78e83de3fe1ca0821b132d5027811f95ee2383c"} Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.653119 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a55cad2e3f25d7dc8621d8432101f2b27a5ed1d1f2e1441783960559e47f1436"} Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.653981 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4e1b289826613c4c58628d06687e688e27df7cfff8b53bf7cdfe0f6bfda37a8f"} Nov 24 17:48:32 crc kubenswrapper[4702]: I1124 17:48:32.654706 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"854c4fbdef9cf14a9e3d2de817755f7c24a1f0f41df20cb6060ad537ac65d6aa"} Nov 24 17:48:32 crc kubenswrapper[4702]: W1124 17:48:32.710083 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:32 crc kubenswrapper[4702]: E1124 17:48:32.710160 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:33 crc kubenswrapper[4702]: E1124 17:48:33.000277 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="1.6s" Nov 24 17:48:33 crc kubenswrapper[4702]: W1124 17:48:33.113906 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:33 crc kubenswrapper[4702]: E1124 17:48:33.114025 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.204306 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.207200 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.207269 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.207285 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.207317 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:33 crc kubenswrapper[4702]: E1124 17:48:33.207905 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.587994 4702 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.660343 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.660391 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.660404 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.660413 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.660511 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661559 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661594 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661727 4702 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4" exitCode=0 Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661793 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.661892 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.662488 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.662529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.662542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.662990 4702 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="14c7d2b0d048dcef6aef3e1c8448db55fcbbb8198d229e344c519abbb353cc43" exitCode=0 Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.663100 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.663284 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"14c7d2b0d048dcef6aef3e1c8448db55fcbbb8198d229e344c519abbb353cc43"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.663893 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.663922 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.663933 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.664047 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665016 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665053 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665068 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665623 4702 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a" exitCode=0 Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665676 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.665709 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667077 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667105 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667114 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667876 4702 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f" exitCode=0 Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667910 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f"} Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.667946 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.668655 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.668680 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:33 crc kubenswrapper[4702]: I1124 17:48:33.668688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.588239 4702 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:34 crc kubenswrapper[4702]: E1124 17:48:34.602333 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="3.2s" Nov 24 17:48:34 crc kubenswrapper[4702]: W1124 17:48:34.623083 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:34 crc kubenswrapper[4702]: E1124 17:48:34.623164 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.675342 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.675408 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.675422 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.675434 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.677850 4702 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="1842e07af56e2cc374804681a877a9267cf398304dcb83d033dfd8c1fd7e264c" exitCode=0 Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.678033 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.678032 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"1842e07af56e2cc374804681a877a9267cf398304dcb83d033dfd8c1fd7e264c"} Nov 24 17:48:34 crc kubenswrapper[4702]: W1124 17:48:34.678878 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:34 crc kubenswrapper[4702]: E1124 17:48:34.678955 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.678979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.679004 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.679018 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.681102 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"c6529a81232f3f57051a0982e935a142238a75bc8f2e555d58c7f1422e48c8e6"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.681143 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.682819 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.682847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.682860 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.689228 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.690054 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.690420 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.690467 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.690479 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d"} Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.691272 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.691305 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.691318 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.692041 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.692059 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.692082 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.808198 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.809714 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.809773 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.809791 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.809866 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:34 crc kubenswrapper[4702]: E1124 17:48:34.810593 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 24 17:48:34 crc kubenswrapper[4702]: I1124 17:48:34.962457 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:35 crc kubenswrapper[4702]: W1124 17:48:35.133250 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 24 17:48:35 crc kubenswrapper[4702]: E1124 17:48:35.133319 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.693998 4702 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="202f80ae5d5e320a406d1e499e827fe192833f5da8ad6ffc604e02108015a6dd" exitCode=0 Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.694116 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"202f80ae5d5e320a406d1e499e827fe192833f5da8ad6ffc604e02108015a6dd"} Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.694218 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.695649 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.695686 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.695699 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698176 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698186 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698205 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698717 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc"} Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698779 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.698874 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.699667 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.699688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.699697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700613 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700641 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700648 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700620 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700791 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700817 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:35 crc kubenswrapper[4702]: I1124 17:48:35.700874 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704593 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c4b4116d47ec2e07e9bd8eb1471b50f168401dfdf2f87ca864989fe1e8bf50cc"} Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704634 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704636 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"982c97dfd930219f3ebcd749e93e4d5e3c5fdb3b2e20934e440b06e3432c9402"} Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704650 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"dad3b669bbb13eece9f9fbbbc69ae5d556428a99548cc0fe06847fe950668039"} Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704659 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"040b74967032cfda512e8375ba39a4250924329f026059b58f61e0e115d6dacc"} Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704667 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a0d705a98f8bd06bd2ed4ecbf81d06d5b4713ce6a6ec9b75ec16a87c53e2be68"} Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704667 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.704682 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705772 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705807 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705816 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705820 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705841 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:36 crc kubenswrapper[4702]: I1124 17:48:36.705852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.092591 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.707364 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.708562 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.708595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.708605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.962697 4702 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 17:48:37 crc kubenswrapper[4702]: I1124 17:48:37.962783 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.011095 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.015751 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.015844 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.015864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.015904 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.563301 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.711664 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.712505 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.712534 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:38 crc kubenswrapper[4702]: I1124 17:48:38.712543 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.052933 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.053190 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.053243 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.055284 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.055371 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.055391 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.498292 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.714955 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.714984 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.715009 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716202 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716261 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716279 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716273 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:39 crc kubenswrapper[4702]: I1124 17:48:39.716539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:40 crc kubenswrapper[4702]: I1124 17:48:40.279670 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:40 crc kubenswrapper[4702]: I1124 17:48:40.279900 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:40 crc kubenswrapper[4702]: I1124 17:48:40.281186 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:40 crc kubenswrapper[4702]: I1124 17:48:40.281276 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:40 crc kubenswrapper[4702]: I1124 17:48:40.281296 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.588125 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.588365 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.590347 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.590391 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.590404 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:41 crc kubenswrapper[4702]: E1124 17:48:41.703214 4702 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.769771 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.770019 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.771619 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.771648 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:41 crc kubenswrapper[4702]: I1124 17:48:41.771657 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.822502 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.822675 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.823926 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.823956 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.823969 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.827311 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:42 crc kubenswrapper[4702]: I1124 17:48:42.997500 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:43 crc kubenswrapper[4702]: I1124 17:48:43.001442 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:43 crc kubenswrapper[4702]: I1124 17:48:43.723598 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:43 crc kubenswrapper[4702]: I1124 17:48:43.724616 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:43 crc kubenswrapper[4702]: I1124 17:48:43.724683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:43 crc kubenswrapper[4702]: I1124 17:48:43.724693 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:44 crc kubenswrapper[4702]: I1124 17:48:44.726022 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:44 crc kubenswrapper[4702]: I1124 17:48:44.727145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:44 crc kubenswrapper[4702]: I1124 17:48:44.727202 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:44 crc kubenswrapper[4702]: I1124 17:48:44.727214 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:45 crc kubenswrapper[4702]: I1124 17:48:45.589735 4702 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 24 17:48:45 crc kubenswrapper[4702]: W1124 17:48:45.638682 4702 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 24 17:48:45 crc kubenswrapper[4702]: I1124 17:48:45.638886 4702 trace.go:236] Trace[75200837]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:48:35.637) (total time: 10001ms): Nov 24 17:48:45 crc kubenswrapper[4702]: Trace[75200837]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:48:45.638) Nov 24 17:48:45 crc kubenswrapper[4702]: Trace[75200837]: [10.00138822s] [10.00138822s] END Nov 24 17:48:45 crc kubenswrapper[4702]: E1124 17:48:45.638934 4702 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 24 17:48:46 crc kubenswrapper[4702]: I1124 17:48:46.094532 4702 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 17:48:46 crc kubenswrapper[4702]: I1124 17:48:46.094624 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 17:48:46 crc kubenswrapper[4702]: I1124 17:48:46.101675 4702 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 17:48:46 crc kubenswrapper[4702]: I1124 17:48:46.101789 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 17:48:47 crc kubenswrapper[4702]: I1124 17:48:47.963415 4702 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 17:48:47 crc kubenswrapper[4702]: I1124 17:48:47.963507 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.593035 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.593857 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.595778 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.595894 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.595911 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.607077 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.739061 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.740724 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.740828 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:48 crc kubenswrapper[4702]: I1124 17:48:48.740852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.057422 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.057636 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.059004 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.059076 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.059098 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.063251 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.742073 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.745002 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.745045 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:49 crc kubenswrapper[4702]: I1124 17:48:49.745055 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:50 crc kubenswrapper[4702]: I1124 17:48:50.689586 4702 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.096945 4702 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.099377 4702 trace.go:236] Trace[1730239782]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:48:40.241) (total time: 10857ms): Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[1730239782]: ---"Objects listed" error: 10857ms (17:48:51.099) Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[1730239782]: [10.857658935s] [10.857658935s] END Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.099415 4702 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.100343 4702 trace.go:236] Trace[1787947008]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:48:40.583) (total time: 10516ms): Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[1787947008]: ---"Objects listed" error: 10516ms (17:48:51.100) Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[1787947008]: [10.516665977s] [10.516665977s] END Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.100374 4702 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.100501 4702 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.101026 4702 trace.go:236] Trace[496035296]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:48:38.825) (total time: 12275ms): Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[496035296]: ---"Objects listed" error: 12275ms (17:48:51.100) Nov 24 17:48:51 crc kubenswrapper[4702]: Trace[496035296]: [12.275140393s] [12.275140393s] END Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.101048 4702 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.101095 4702 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.140102 4702 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:45858->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.140170 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:45858->192.168.126.11:17697: read: connection reset by peer" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.140537 4702 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.140580 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.579683 4702 apiserver.go:52] "Watching apiserver" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.583029 4702 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.583354 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-swrxh","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.583706 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.583724 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.584389 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.586020 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.586326 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.586545 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.586693 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.589074 4702 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.589139 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.589387 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.589994 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.590111 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.590173 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592189 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592260 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592357 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592435 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.592493 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592269 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.592558 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.593964 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.594058 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.594116 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.594618 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.595026 4702 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603031 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603066 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603086 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603104 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603196 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603214 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603230 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603247 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603265 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603279 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603296 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603313 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603326 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603343 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603359 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603375 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603390 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603492 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603547 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603564 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603580 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603619 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603632 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603646 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603638 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603706 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603770 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603821 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603845 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603865 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603882 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603884 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603926 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603942 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603957 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.603994 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604010 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604026 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604045 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604062 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604080 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604095 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604112 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604371 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604386 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604419 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604436 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604467 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604487 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604507 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604528 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604551 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604573 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604592 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604613 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604629 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604681 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604697 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604712 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604741 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604757 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604773 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604790 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604822 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604837 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604850 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604865 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604880 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604915 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604948 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604969 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604987 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605004 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605021 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605041 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605055 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605070 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605085 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605104 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605120 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605135 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605150 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605171 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605188 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605204 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605220 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605237 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605272 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605290 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605309 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605324 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605339 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605355 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605370 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605385 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605401 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605423 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605438 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605454 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605470 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605487 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605502 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605518 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605535 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605550 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605566 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605583 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605598 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605616 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605631 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605647 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605665 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605687 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605705 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605725 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605754 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605770 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605786 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605818 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605834 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605856 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605874 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605889 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605909 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605930 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605952 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605989 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606006 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606022 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606040 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606056 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606072 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606089 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606113 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606139 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606162 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606184 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606215 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606242 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606265 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606286 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606306 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606329 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606352 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606374 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606423 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606444 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606460 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606477 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606494 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606511 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606529 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606546 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606561 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606577 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606592 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606608 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606624 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606640 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606658 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606674 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606690 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606705 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606720 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606736 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606753 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606773 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606790 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604093 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606837 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.604578 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606864 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605268 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605262 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.605779 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606783 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606793 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.608651 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.608837 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.608903 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.609083 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.609112 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.609310 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.610079 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.610174 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.610270 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.612386 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.612979 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.613001 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.613557 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.613966 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614009 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614164 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614190 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614265 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614282 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614340 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614530 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614640 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614774 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614906 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.614976 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615050 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615065 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615091 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615144 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615176 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615296 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615340 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615381 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615400 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615406 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615545 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615610 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615630 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615898 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.615902 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.616012 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.616168 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.616761 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.616835 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.617033 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.617352 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.617636 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.618310 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.618455 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.618668 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.618715 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.618827 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.619388 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.619471 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.619670 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.619905 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.620238 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.620167 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.620460 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.620472 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.620867 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.621125 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.621174 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.621668 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.621853 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.621865 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.622115 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.622605 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.622781 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.622923 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.623292 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.623486 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.623596 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.623937 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.624253 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.624305 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:48:52.12427727 +0000 UTC m=+21.365018504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.628552 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.628900 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.629645 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630013 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630057 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630276 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630348 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630496 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630727 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.630953 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631006 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.606888 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631099 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631123 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631132 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631144 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631136 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631167 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631185 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631195 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631204 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631318 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631406 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631450 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631494 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631520 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631546 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631569 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631582 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631592 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631619 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631642 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631654 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631666 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631691 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631772 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631818 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631803 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631883 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631906 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631923 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631940 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631957 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631974 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631992 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632007 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632049 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632080 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632104 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632135 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhkh8\" (UniqueName: \"kubernetes.io/projected/72362b16-8d6f-45db-abfa-a416b2d8d60c-kube-api-access-fhkh8\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632156 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632174 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632195 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632212 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632230 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632248 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632264 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/72362b16-8d6f-45db-abfa-a416b2d8d60c-hosts-file\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632280 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632296 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632313 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632329 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632347 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632451 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632462 4702 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632473 4702 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632484 4702 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632493 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632502 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632511 4702 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632520 4702 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632529 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632538 4702 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632548 4702 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632558 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632566 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632575 4702 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632585 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632597 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632606 4702 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632614 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632624 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632633 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632642 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632650 4702 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632659 4702 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632668 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632679 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632688 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632696 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632705 4702 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632714 4702 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632723 4702 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632732 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632742 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632751 4702 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632759 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632768 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632778 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632787 4702 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632800 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632831 4702 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632841 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632851 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632859 4702 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632868 4702 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632877 4702 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632886 4702 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633079 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633098 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633844 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.634976 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635003 4702 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635029 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635044 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635056 4702 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635067 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635076 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635087 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635100 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635115 4702 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635130 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635142 4702 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635152 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635161 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635170 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635178 4702 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635191 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635202 4702 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635214 4702 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635226 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635238 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635249 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635259 4702 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635268 4702 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635277 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635286 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635294 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635304 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635321 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635334 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635345 4702 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635357 4702 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635370 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635382 4702 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635394 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635405 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635418 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635430 4702 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635441 4702 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635453 4702 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635464 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635476 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635491 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635504 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635516 4702 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635532 4702 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635545 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635559 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635571 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635584 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635597 4702 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635610 4702 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635622 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635635 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635647 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.631889 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632073 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632083 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632094 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632290 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632350 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.632532 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633209 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633473 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.633645 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.634997 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635073 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635568 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635613 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635642 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635831 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635904 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.635941 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.636259 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.636499 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.636662 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.636688 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.636776 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.638445 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.638854 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.638927 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.639134 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.639416 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.639597 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.639862 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.640238 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.640538 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.640652 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.640829 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.640864 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.641110 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.641370 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.641509 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.641730 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.641758 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.642170 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.642254 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.642620 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.643126 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.643163 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.643505 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.643686 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.643946 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.644170 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.644251 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.644249 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.644656 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.644764 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.645187 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.645362 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.646665 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.651237 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.651358 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.651907 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.652246 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.652490 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.652781 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.653020 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.653616 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.654114 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.654326 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.654388 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:52.154369608 +0000 UTC m=+21.395110812 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.654498 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.657837 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.658761 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.658888 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:52.158837542 +0000 UTC m=+21.399578706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.662706 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.659755 4702 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.665115 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.668908 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.671608 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.671984 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.674461 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.675159 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.676455 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.676773 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.678833 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.678983 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.679104 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.679243 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:52.179224739 +0000 UTC m=+21.419965903 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.680917 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.675782 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.681589 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.682517 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.682600 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.682678 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:51 crc kubenswrapper[4702]: E1124 17:48:51.682767 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:52.182745024 +0000 UTC m=+21.423486188 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.683410 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.684004 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.684458 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.686984 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.688322 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.689234 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.689515 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.690488 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.690486 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699516 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.690508 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699352 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699698 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699717 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699791 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699533 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699847 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.699936 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.700068 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.700081 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.700091 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.701074 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.701475 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.701981 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.703354 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.704252 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.708040 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.712388 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.713260 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.714271 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.716370 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.723700 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.725785 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.727868 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.728065 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.728646 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.729179 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.729287 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.730422 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.731057 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.732069 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.732472 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.733132 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.734287 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.734496 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.734876 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.735949 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736007 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736049 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/72362b16-8d6f-45db-abfa-a416b2d8d60c-hosts-file\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736107 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736143 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhkh8\" (UniqueName: \"kubernetes.io/projected/72362b16-8d6f-45db-abfa-a416b2d8d60c-kube-api-access-fhkh8\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736188 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736198 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736199 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736208 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736257 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736273 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736286 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736298 4702 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736311 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736323 4702 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736335 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736390 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736424 4702 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736445 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.736997 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/72362b16-8d6f-45db-abfa-a416b2d8d60c-hosts-file\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737026 4702 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737125 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737155 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737168 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737182 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737196 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737210 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737222 4702 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737236 4702 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737256 4702 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737268 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737281 4702 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737293 4702 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737304 4702 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737316 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737328 4702 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737339 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737350 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737362 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737374 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737387 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737401 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737413 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737425 4702 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737440 4702 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737452 4702 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737464 4702 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737476 4702 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737488 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737501 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737514 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737527 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737440 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737539 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737561 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737573 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737588 4702 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737622 4702 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737635 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737647 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737659 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737672 4702 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737686 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737699 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737712 4702 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737723 4702 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737735 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737749 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737761 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737773 4702 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737785 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737801 4702 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737829 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737843 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737856 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737869 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737883 4702 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737894 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737906 4702 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737918 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737930 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737971 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.737988 4702 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738000 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738013 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738026 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738056 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738069 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738081 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738094 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738105 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738117 4702 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738131 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738135 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738143 4702 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738156 4702 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738169 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738180 4702 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738193 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738233 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738246 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738260 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738272 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.738722 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.739926 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.740494 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.742318 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.742855 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.743909 4702 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.744037 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.746193 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.746926 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.748020 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.749125 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.749656 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.751329 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.752035 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.753696 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.754502 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.755551 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.756190 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.757002 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.758008 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.758153 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.758900 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.759979 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.760470 4702 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc" exitCode=255 Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.760549 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.760640 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhkh8\" (UniqueName: \"kubernetes.io/projected/72362b16-8d6f-45db-abfa-a416b2d8d60c-kube-api-access-fhkh8\") pod \"node-resolver-swrxh\" (UID: \"72362b16-8d6f-45db-abfa-a416b2d8d60c\") " pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.761586 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.762533 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.763605 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.764201 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.764766 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.766021 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.766664 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.767759 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.768360 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc"} Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.768415 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-wmjst"] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.768754 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-wtx9m"] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.769400 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.769742 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-8g6cn"] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.770385 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.770469 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.771951 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.776972 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.780053 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.780266 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.781182 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.781791 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.782857 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.783222 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.783388 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.783466 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.783585 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.784582 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.784842 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.787636 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.799557 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.810827 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.820272 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.829390 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.838955 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-daemon-config\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839004 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-proxy-tls\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839026 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-cni-binary-copy\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839049 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-netns\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839097 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4rff\" (UniqueName: \"kubernetes.io/projected/f4859751-212a-4d94-b0c7-875b1da99cd8-kube-api-access-f4rff\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839123 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-hostroot\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839147 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839167 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nsh2\" (UniqueName: \"kubernetes.io/projected/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-kube-api-access-6nsh2\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839185 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-multus-certs\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839217 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-rootfs\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839240 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839260 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839281 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-cnibin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839303 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-bin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839334 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-etc-kubernetes\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839416 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-os-release\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839486 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp9c6\" (UniqueName: \"kubernetes.io/projected/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-kube-api-access-hp9c6\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839565 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-multus\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839619 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-system-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839646 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-os-release\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839670 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-system-cni-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839693 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-kubelet\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839717 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cnibin\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839742 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-mcd-auth-proxy-config\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839763 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-socket-dir-parent\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839783 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-conf-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839846 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-k8s-cni-cncf-io\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.839875 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-binary-copy\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.841354 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.852303 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.865512 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.870490 4702 scope.go:117] "RemoveContainer" containerID="e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.870797 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.880304 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.892910 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.902781 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.904438 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.914517 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: W1124 17:48:51.918692 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-0dafdf37882b5e1a41e3f88b2bea7f7278cb408e45c3f03b51aa7a70c9ecaa72 WatchSource:0}: Error finding container 0dafdf37882b5e1a41e3f88b2bea7f7278cb408e45c3f03b51aa7a70c9ecaa72: Status 404 returned error can't find the container with id 0dafdf37882b5e1a41e3f88b2bea7f7278cb408e45c3f03b51aa7a70c9ecaa72 Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.924739 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.929292 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.939728 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.940976 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-proxy-tls\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941024 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-cni-binary-copy\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941051 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-netns\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941076 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4rff\" (UniqueName: \"kubernetes.io/projected/f4859751-212a-4d94-b0c7-875b1da99cd8-kube-api-access-f4rff\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941102 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-hostroot\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941126 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941178 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nsh2\" (UniqueName: \"kubernetes.io/projected/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-kube-api-access-6nsh2\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941204 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-multus-certs\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941261 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941284 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941309 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-rootfs\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941327 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-cnibin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941350 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-bin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941382 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-etc-kubernetes\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941405 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-os-release\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941438 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp9c6\" (UniqueName: \"kubernetes.io/projected/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-kube-api-access-hp9c6\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941463 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-multus\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941484 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-system-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941508 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-os-release\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941539 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-system-cni-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941571 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-kubelet\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941593 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cnibin\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941617 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-mcd-auth-proxy-config\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941640 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-socket-dir-parent\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941665 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-conf-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941697 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-k8s-cni-cncf-io\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941719 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-binary-copy\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.941738 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-daemon-config\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.942569 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-daemon-config\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.943703 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-os-release\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.943773 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-netns\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.943960 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-cnibin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.943986 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-kubelet\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944066 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-bin\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944106 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-etc-kubernetes\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944141 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cnibin\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944175 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-k8s-cni-cncf-io\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944208 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-conf-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944206 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-var-lib-cni-multus\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944251 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-system-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944310 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-os-release\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944346 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-system-cni-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944537 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-cni-dir\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944588 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-hostroot\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944601 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944623 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-rootfs\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944658 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-host-run-multus-certs\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944670 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.944743 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4859751-212a-4d94-b0c7-875b1da99cd8-multus-socket-dir-parent\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.945157 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-cni-binary-copy\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.945378 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-mcd-auth-proxy-config\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.945495 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4859751-212a-4d94-b0c7-875b1da99cd8-cni-binary-copy\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.947570 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-proxy-tls\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.970084 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.982927 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.989485 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4rff\" (UniqueName: \"kubernetes.io/projected/f4859751-212a-4d94-b0c7-875b1da99cd8-kube-api-access-f4rff\") pod \"multus-8g6cn\" (UID: \"f4859751-212a-4d94-b0c7-875b1da99cd8\") " pod="openshift-multus/multus-8g6cn" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.997045 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nsh2\" (UniqueName: \"kubernetes.io/projected/9a77fa32-4f49-4b02-ac4a-fbad4d33e499-kube-api-access-6nsh2\") pod \"machine-config-daemon-wmjst\" (UID: \"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\") " pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:51 crc kubenswrapper[4702]: I1124 17:48:51.997634 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp9c6\" (UniqueName: \"kubernetes.io/projected/8b4fc11b-9bbd-42a2-9472-0c486ca426b3-kube-api-access-hp9c6\") pod \"multus-additional-cni-plugins-wtx9m\" (UID: \"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\") " pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.011028 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-swrxh" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.021117 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.091207 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.094995 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8g6cn" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.099852 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.108440 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:48:52 crc kubenswrapper[4702]: W1124 17:48:52.128630 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4859751_212a_4d94_b0c7_875b1da99cd8.slice/crio-a0bc49dffb7377270a89fa9545b692923cd8bd70bc14c167614fb743f8caa324 WatchSource:0}: Error finding container a0bc49dffb7377270a89fa9545b692923cd8bd70bc14c167614fb743f8caa324: Status 404 returned error can't find the container with id a0bc49dffb7377270a89fa9545b692923cd8bd70bc14c167614fb743f8caa324 Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.146352 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.146554 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:48:53.146520614 +0000 UTC m=+22.387261788 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.158836 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-f5g6n"] Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.159651 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163287 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163544 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163684 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163736 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163825 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.163977 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.164166 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.172306 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.185141 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.194613 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.204324 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.215418 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.228016 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.243530 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.249916 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.249968 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.249998 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250026 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250051 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250075 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250092 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250091 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250121 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250136 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250138 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250175 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250187 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:53.250170845 +0000 UTC m=+22.490912019 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250199 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250218 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250235 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250253 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250268 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250281 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250294 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250313 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wkcd\" (UniqueName: \"kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250331 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250346 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250380 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250396 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250414 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250430 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.250452 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250513 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250559 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250564 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250569 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250587 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250608 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:53.250600897 +0000 UTC m=+22.491342061 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250622 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:53.250616438 +0000 UTC m=+22.491357592 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:52 crc kubenswrapper[4702]: E1124 17:48:52.250644 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:53.250634168 +0000 UTC m=+22.491375332 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.255422 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.279197 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.296294 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.309773 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.321486 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.353881 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.353940 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.353957 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.353972 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.353988 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354013 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354028 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354045 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354060 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354082 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354102 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354121 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354136 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354150 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354166 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wkcd\" (UniqueName: \"kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354187 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354204 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354219 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354236 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354250 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354880 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.354966 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355055 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355096 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355111 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355133 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355136 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355160 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355168 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355191 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355216 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355222 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355219 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355230 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355329 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355350 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355927 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.355991 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.362666 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.375396 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wkcd\" (UniqueName: \"kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd\") pod \"ovnkube-node-f5g6n\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.537577 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:52 crc kubenswrapper[4702]: W1124 17:48:52.550945 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4b86a8_9180_41ee_b240_0071bdc994da.slice/crio-51e341ebc0666948185e993ba5f1faaab024c08cd1cc3ba2c47a14b6bf087568 WatchSource:0}: Error finding container 51e341ebc0666948185e993ba5f1faaab024c08cd1cc3ba2c47a14b6bf087568: Status 404 returned error can't find the container with id 51e341ebc0666948185e993ba5f1faaab024c08cd1cc3ba2c47a14b6bf087568 Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.763767 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"391e1c53474a2c726c062224141526fa6a1db8645b51c16a67d5f5ce7608eff7"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.765012 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.765036 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"0dafdf37882b5e1a41e3f88b2bea7f7278cb408e45c3f03b51aa7a70c9ecaa72"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.767482 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742" exitCode=0 Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.767561 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.767593 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"51e341ebc0666948185e993ba5f1faaab024c08cd1cc3ba2c47a14b6bf087568"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.769414 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-swrxh" event={"ID":"72362b16-8d6f-45db-abfa-a416b2d8d60c","Type":"ContainerStarted","Data":"8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.769488 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-swrxh" event={"ID":"72362b16-8d6f-45db-abfa-a416b2d8d60c","Type":"ContainerStarted","Data":"d3facad56adade8b49f503dee4a7194cd98ef2b0557881a23c08a104d58c9283"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.771050 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.771097 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.771108 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"df9bff62bc0f3a61c0f85bb8003dc3641e5b3eb993189410312999c9b1e6cdca"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.772921 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.772942 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.772952 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"b44b8895a3764d980fa372f77d1d9f4e4ecf51813aeee39ba8ce5872dd3323ab"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.775693 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.779555 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.779742 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.781966 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4" exitCode=0 Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.782057 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.782093 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerStarted","Data":"a38b15686276a12ea7a94e6e401b451c6576c1ff2b19eff856e3796f64e6e365"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.784207 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerStarted","Data":"87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.784264 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerStarted","Data":"a0bc49dffb7377270a89fa9545b692923cd8bd70bc14c167614fb743f8caa324"} Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.800136 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.816310 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.828712 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.841978 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.855764 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.868334 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.880071 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.890556 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.902328 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.916497 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.929036 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.941602 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.958145 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.972947 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:52 crc kubenswrapper[4702]: I1124 17:48:52.988712 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.005642 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.031726 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.045398 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.059552 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.071335 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.090229 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.108061 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.126457 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.143605 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.160026 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.160194 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:48:55.1601727 +0000 UTC m=+24.400913864 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.261548 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.261615 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.261648 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.261675 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261739 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261758 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261779 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261792 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261796 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:55.26178037 +0000 UTC m=+24.502521534 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261857 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:55.261846981 +0000 UTC m=+24.502588135 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261858 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261896 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261929 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261942 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261969 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:55.261939254 +0000 UTC m=+24.502680458 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.261995 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:55.261985956 +0000 UTC m=+24.502727200 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.638356 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-xprzv"] Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.639129 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.641208 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.641208 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.641252 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.641683 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.648119 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.648149 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.648232 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.648246 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.648391 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:48:53 crc kubenswrapper[4702]: E1124 17:48:53.648492 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.654400 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.670433 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.690065 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.700899 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.711594 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.722437 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.736246 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.750736 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.765898 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57582220-95af-4697-9fa9-76e9cf03c15a-serviceca\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.765958 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxgp5\" (UniqueName: \"kubernetes.io/projected/57582220-95af-4697-9fa9-76e9cf03c15a-kube-api-access-kxgp5\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.765982 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57582220-95af-4697-9fa9-76e9cf03c15a-host\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.766042 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.788615 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789529 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789582 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789592 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789601 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789609 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.789616 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.791595 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228" exitCode=0 Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.791680 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228"} Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.801648 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.816096 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.830881 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.861542 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.866477 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57582220-95af-4697-9fa9-76e9cf03c15a-serviceca\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.866555 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxgp5\" (UniqueName: \"kubernetes.io/projected/57582220-95af-4697-9fa9-76e9cf03c15a-kube-api-access-kxgp5\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.866580 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57582220-95af-4697-9fa9-76e9cf03c15a-host\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.866657 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57582220-95af-4697-9fa9-76e9cf03c15a-host\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.879812 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.883781 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57582220-95af-4697-9fa9-76e9cf03c15a-serviceca\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.899218 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.901262 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxgp5\" (UniqueName: \"kubernetes.io/projected/57582220-95af-4697-9fa9-76e9cf03c15a-kube-api-access-kxgp5\") pod \"node-ca-xprzv\" (UID: \"57582220-95af-4697-9fa9-76e9cf03c15a\") " pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.914397 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.929448 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.946016 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:53 crc kubenswrapper[4702]: I1124 17:48:53.963114 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.007217 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.028855 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.047906 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xprzv" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.048770 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.064185 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: W1124 17:48:54.071326 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57582220_95af_4697_9fa9_76e9cf03c15a.slice/crio-51092b321482126c32e241519033be1a936524241e94707599c9bca55dae8582 WatchSource:0}: Error finding container 51092b321482126c32e241519033be1a936524241e94707599c9bca55dae8582: Status 404 returned error can't find the container with id 51092b321482126c32e241519033be1a936524241e94707599c9bca55dae8582 Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.089706 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.101509 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.795266 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xprzv" event={"ID":"57582220-95af-4697-9fa9-76e9cf03c15a","Type":"ContainerStarted","Data":"26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad"} Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.795703 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xprzv" event={"ID":"57582220-95af-4697-9fa9-76e9cf03c15a","Type":"ContainerStarted","Data":"51092b321482126c32e241519033be1a936524241e94707599c9bca55dae8582"} Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.796523 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc"} Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.799355 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99" exitCode=0 Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.799412 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99"} Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.814035 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.830534 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.850021 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.865987 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.880907 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.899232 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.912414 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.926987 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.938946 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.950792 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.961478 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.968391 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.971957 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.972355 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.977039 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.983301 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:54 crc kubenswrapper[4702]: I1124 17:48:54.994847 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.007902 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.022931 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.036507 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.047906 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.056784 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.066213 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.077986 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.089031 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.098272 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.115723 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.129328 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.145615 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.169464 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.190924 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.191007 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:48:59.190990247 +0000 UTC m=+28.431731411 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.292434 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.292490 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.292517 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.292552 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292639 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292682 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292698 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:59.292682389 +0000 UTC m=+28.533423553 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292695 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292792 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:59.292773652 +0000 UTC m=+28.533514906 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292844 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292869 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.292920 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:59.292910266 +0000 UTC m=+28.533651420 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.293107 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.293127 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.293139 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.293183 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:48:59.293170903 +0000 UTC m=+28.533912067 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.647504 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.647553 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.647513 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.647635 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.647783 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:48:55 crc kubenswrapper[4702]: E1124 17:48:55.647914 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.807433 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f"} Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.810328 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4" exitCode=0 Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.810391 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4"} Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.823525 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.836928 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.846581 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.864972 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.876657 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.889851 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.903941 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.921019 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.932551 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.944142 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.958120 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.969009 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.981791 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:55 crc kubenswrapper[4702]: I1124 17:48:55.991253 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.817677 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494" exitCode=0 Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.817724 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494"} Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.834393 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.851927 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.864267 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.875767 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.889239 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.901509 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.915967 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.939094 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.963158 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:56 crc kubenswrapper[4702]: I1124 17:48:56.980072 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.001226 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.035083 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.072120 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.087715 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.502021 4702 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.504922 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.504966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.504979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.505152 4702 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.525164 4702 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.525616 4702 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.527285 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.527310 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.527325 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.527346 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.527359 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.540887 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.544915 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.544992 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.545006 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.545027 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.545042 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.558271 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.563057 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.563107 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.563118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.563137 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.563148 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.575331 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.579139 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.579183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.579195 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.579218 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.579236 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.592129 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.596835 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.596888 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.596900 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.596921 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.596932 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.610930 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.611080 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.613025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.613060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.613071 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.613090 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.613106 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.647919 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.648225 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.648370 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.648359 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.648563 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:48:57 crc kubenswrapper[4702]: E1124 17:48:57.648656 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.715694 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.715889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.715979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.716090 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.716167 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.818582 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.819073 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.819088 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.819111 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.819125 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.824945 4702 generic.go:334] "Generic (PLEG): container finished" podID="8b4fc11b-9bbd-42a2-9472-0c486ca426b3" containerID="91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498" exitCode=0 Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.825035 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerDied","Data":"91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498"} Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.842129 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.858517 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.875641 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.891412 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.908249 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.921922 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.922149 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.922226 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.922249 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.922275 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.922291 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:57Z","lastTransitionTime":"2025-11-24T17:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.938905 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.951881 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.961578 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.972201 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:57 crc kubenswrapper[4702]: I1124 17:48:57.987331 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.000257 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.014511 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.024655 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.024681 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.024692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.024709 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.024722 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.034178 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.127089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.127241 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.127301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.127366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.127425 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.229779 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.229876 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.229890 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.229915 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.229931 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.332843 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.332908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.332928 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.332951 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.332968 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.436436 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.436464 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.436472 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.436486 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.436495 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.539458 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.539495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.539504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.539516 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.539525 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.642842 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.642908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.642920 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.642942 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.642956 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.746981 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.747050 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.747070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.747098 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.747122 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.835518 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.836418 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.842946 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" event={"ID":"8b4fc11b-9bbd-42a2-9472-0c486ca426b3","Type":"ContainerStarted","Data":"aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.849444 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.849500 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.849516 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.849542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.849562 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.855902 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.875234 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.891199 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.903457 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.904732 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.928826 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.944793 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.952224 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.952294 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.952310 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.952337 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.952357 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:58Z","lastTransitionTime":"2025-11-24T17:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.958307 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.973264 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:58 crc kubenswrapper[4702]: I1124 17:48:58.988720 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.004204 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.017588 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.029137 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.038936 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.048580 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.055555 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.055596 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.055608 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.055624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.055636 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.062419 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.073727 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.084771 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.101025 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.115334 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.128445 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.143409 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.154263 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.157484 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.157543 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.157565 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.157580 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.157591 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.167340 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.179346 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.190764 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.201460 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.210134 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.220313 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.234766 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.234944 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.234916842 +0000 UTC m=+36.475658006 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.260555 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.260615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.260629 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.260649 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.260661 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.335918 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.335990 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.336027 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.336066 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336166 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336226 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336247 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336263 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.336242493 +0000 UTC m=+36.576983657 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336266 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336270 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336308 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.336297035 +0000 UTC m=+36.577038199 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336321 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336378 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.336352766 +0000 UTC m=+36.577094090 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336392 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336414 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.336513 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.33648524 +0000 UTC m=+36.577226404 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.363258 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.363298 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.363307 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.363321 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.363330 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.465924 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.465972 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.465985 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.466002 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.466015 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.569697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.569787 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.569847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.569888 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.569917 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.647259 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.647457 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.647570 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.647727 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.647930 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:48:59 crc kubenswrapper[4702]: E1124 17:48:59.648088 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.672535 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.672571 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.672582 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.672598 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.672607 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.775516 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.775563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.775576 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.775593 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.775629 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.846053 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.846119 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.871061 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.877862 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.877928 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.877944 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.877972 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.877993 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.886911 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.900865 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.916061 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.930340 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.944030 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.960507 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.974848 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.980640 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.980696 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.980708 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.980741 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.980756 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:48:59Z","lastTransitionTime":"2025-11-24T17:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.987903 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:48:59 crc kubenswrapper[4702]: I1124 17:48:59.999148 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:48:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.011073 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.027448 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.042062 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.057854 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.076493 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.083393 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.083436 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.083447 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.083465 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.083479 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.185500 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.185553 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.185567 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.185586 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.185601 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.288710 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.288778 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.288837 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.288869 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.288891 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.391835 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.391880 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.391889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.391909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.391919 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.495033 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.495096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.495115 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.495141 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.495162 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.598795 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.598882 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.598895 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.598913 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.598926 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.702034 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.702123 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.702135 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.702155 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.702170 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.804319 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.804370 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.804384 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.804408 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.804422 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.907299 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.907371 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.907382 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.907405 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:00 crc kubenswrapper[4702]: I1124 17:49:00.907417 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:00Z","lastTransitionTime":"2025-11-24T17:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.009729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.009786 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.009817 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.009837 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.009846 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.112485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.112558 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.112572 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.112596 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.112612 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.214925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.214977 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.214990 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.215007 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.215018 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.317263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.317300 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.317309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.317322 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.317332 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.418873 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.418899 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.418912 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.418925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.418935 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.522097 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.522380 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.522449 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.522546 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.522606 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.625847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.626614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.626694 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.626740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.626760 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.647255 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.647355 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:01 crc kubenswrapper[4702]: E1124 17:49:01.647425 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:01 crc kubenswrapper[4702]: E1124 17:49:01.647491 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.647550 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:01 crc kubenswrapper[4702]: E1124 17:49:01.647597 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.662432 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.673407 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.689481 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.709040 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.721884 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.729025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.729061 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.729071 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.729087 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.729097 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.731711 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.743290 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.755376 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.769741 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.782210 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.795014 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.812073 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.826579 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.830936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.830977 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.830985 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.830999 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.831008 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.840695 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.933289 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.933336 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.933348 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.933366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:01 crc kubenswrapper[4702]: I1124 17:49:01.933379 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:01Z","lastTransitionTime":"2025-11-24T17:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.035673 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.035712 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.035723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.035740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.035752 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.138005 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.138050 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.138062 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.138078 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.138090 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.240148 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.240188 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.240195 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.240208 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.240219 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.343096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.343145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.343156 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.343173 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.343185 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.444860 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.444901 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.444912 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.444929 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.444941 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.546974 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.547040 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.547057 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.547078 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.547092 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.649485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.649516 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.649533 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.649545 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.649553 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.752031 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.752073 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.752083 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.752097 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.752107 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.855201 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.855250 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.855261 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.855306 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.855318 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.856715 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/0.log" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.858830 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4" exitCode=1 Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.858865 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.859448 4702 scope.go:117] "RemoveContainer" containerID="7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.872382 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.886314 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.896096 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.907198 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.918792 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.929613 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.940645 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.951772 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.958915 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.958956 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.958966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.958981 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.958991 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:02Z","lastTransitionTime":"2025-11-24T17:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.965068 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.982621 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:02 crc kubenswrapper[4702]: I1124 17:49:02.996793 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.009503 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.025297 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.037852 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.061714 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.061753 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.061761 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.061775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.061785 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.164790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.164864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.164873 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.164890 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.164904 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.267589 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.267640 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.267649 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.267662 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.267671 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.370304 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.370338 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.370346 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.370359 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.370369 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.472466 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.472510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.472520 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.472533 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.472542 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.575544 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.575586 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.575602 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.575620 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.575630 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.647924 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.647942 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.647996 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:03 crc kubenswrapper[4702]: E1124 17:49:03.648151 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:03 crc kubenswrapper[4702]: E1124 17:49:03.648221 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:03 crc kubenswrapper[4702]: E1124 17:49:03.648319 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.678319 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.678368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.678379 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.678397 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.678410 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.780748 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.780822 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.780836 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.780850 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.780859 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.863488 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/0.log" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.865698 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.866096 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.878380 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.882583 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.882630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.882644 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.882661 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.882672 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.890551 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.901489 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.912885 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.926534 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.939769 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.960783 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.974132 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.985474 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.985516 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.985525 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.985538 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.985550 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:03Z","lastTransitionTime":"2025-11-24T17:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:03 crc kubenswrapper[4702]: I1124 17:49:03.989449 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.007923 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.025485 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.043087 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.080464 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.087964 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.088028 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.088052 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.088074 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.088086 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.095974 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.190521 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.190568 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.190580 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.190595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.190607 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.293519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.293577 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.293594 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.293612 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.293623 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.396185 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.396235 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.396246 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.396263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.396274 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.401702 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9"] Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.402184 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.404873 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.405123 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.419024 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.431029 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.442719 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.464707 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.476122 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.489303 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-528ng\" (UniqueName: \"kubernetes.io/projected/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-kube-api-access-528ng\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.489472 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.489526 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.489749 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.489899 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.498906 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.498946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.498959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.498975 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.498986 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.505103 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.515966 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.528935 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.541089 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.552460 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.564896 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.575270 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.585966 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.591229 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-528ng\" (UniqueName: \"kubernetes.io/projected/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-kube-api-access-528ng\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.591271 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.591291 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.591328 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.592141 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.592181 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.599064 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.599154 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.601309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.601344 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.601358 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.601375 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.601385 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.611071 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-528ng\" (UniqueName: \"kubernetes.io/projected/e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d-kube-api-access-528ng\") pod \"ovnkube-control-plane-749d76644c-6vrt9\" (UID: \"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.704688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.704715 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.704723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.704735 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.704743 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.716228 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" Nov 24 17:49:04 crc kubenswrapper[4702]: W1124 17:49:04.727893 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3bd7cdb_7d41_4a5a_8e56_3bb3e48af13d.slice/crio-295ff1d0013efb80886c7ce0b4a96b9bc59db914c0eae2e0487d8ca94853098a WatchSource:0}: Error finding container 295ff1d0013efb80886c7ce0b4a96b9bc59db914c0eae2e0487d8ca94853098a: Status 404 returned error can't find the container with id 295ff1d0013efb80886c7ce0b4a96b9bc59db914c0eae2e0487d8ca94853098a Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.806793 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.806866 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.806877 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.806912 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.806924 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.872484 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" event={"ID":"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d","Type":"ContainerStarted","Data":"295ff1d0013efb80886c7ce0b4a96b9bc59db914c0eae2e0487d8ca94853098a"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.875216 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/1.log" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.876043 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/0.log" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.879588 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab" exitCode=1 Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.879629 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.879684 4702 scope.go:117] "RemoveContainer" containerID="7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.880380 4702 scope.go:117] "RemoveContainer" containerID="d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab" Nov 24 17:49:04 crc kubenswrapper[4702]: E1124 17:49:04.880616 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.892275 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.906044 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.909867 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.909896 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.909907 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.909923 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.909934 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:04Z","lastTransitionTime":"2025-11-24T17:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.917865 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.929397 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.940394 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.953346 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.966336 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:04 crc kubenswrapper[4702]: I1124 17:49:04.978391 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.000403 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.012026 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.012084 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.012096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.012113 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.012428 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.016689 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.029143 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.042529 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.057877 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.076571 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.090992 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.114900 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.114936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.114947 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.114963 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.114976 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.217137 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.217171 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.217182 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.217199 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.217209 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.320031 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.320074 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.320087 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.320103 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.320115 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.422401 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.422451 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.422462 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.422480 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.422493 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.499632 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-wkxgm"] Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.500476 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.500612 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.515873 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.524363 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.524394 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.524404 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.524419 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.524431 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.530528 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.547019 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.559006 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.573355 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.584469 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.594836 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.600583 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghwsv\" (UniqueName: \"kubernetes.io/projected/c28d90e3-ab19-480f-989e-3e49d1289b7a-kube-api-access-ghwsv\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.600619 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.612398 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.627145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.627199 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.627211 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.627230 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.627243 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.628703 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.642671 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.647113 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.647133 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.647232 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.647373 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.647589 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.647720 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.655905 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.665297 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.675895 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.685902 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.696298 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.701834 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghwsv\" (UniqueName: \"kubernetes.io/projected/c28d90e3-ab19-480f-989e-3e49d1289b7a-kube-api-access-ghwsv\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.701887 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.702013 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.702079 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:06.202060815 +0000 UTC m=+35.442801979 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.705321 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.716215 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghwsv\" (UniqueName: \"kubernetes.io/projected/c28d90e3-ab19-480f-989e-3e49d1289b7a-kube-api-access-ghwsv\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.729148 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.729186 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.729194 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.729210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.729221 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.831550 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.831587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.831599 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.831615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.831627 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.883922 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" event={"ID":"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d","Type":"ContainerStarted","Data":"b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.883974 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" event={"ID":"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d","Type":"ContainerStarted","Data":"d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.886043 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/1.log" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.890824 4702 scope.go:117] "RemoveContainer" containerID="d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab" Nov 24 17:49:05 crc kubenswrapper[4702]: E1124 17:49:05.890952 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.898998 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.912196 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.925625 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.934636 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.934684 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.934695 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.934711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.934722 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:05Z","lastTransitionTime":"2025-11-24T17:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.943212 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7366e7e4fa7a9b57a68677524b4842d377c7957142c7b20b7dfbef93a7b764d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:01Z\\\",\\\"message\\\":\\\"licy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.560784 5972 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.560962 5972 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:49:01.561031 5972 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:49:01.561080 5972 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:49:01.561204 5972 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:49:01.561263 5972 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:01.561275 5972 factory.go:656] Stopping watch factory\\\\nI1124 17:49:01.561298 5972 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:49:01.561451 5972 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.954416 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.965057 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.977585 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:05 crc kubenswrapper[4702]: I1124 17:49:05.987819 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.000327 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.012778 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.022814 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.033106 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.036454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.036494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.036505 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.036522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.036531 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.042996 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.051920 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.061462 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.070723 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.083024 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.096334 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.108867 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.125909 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.136010 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.138593 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.138656 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.138669 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.138689 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.138700 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.149301 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.164430 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.175246 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.187592 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.197636 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.207131 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:06 crc kubenswrapper[4702]: E1124 17:49:06.207290 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:06 crc kubenswrapper[4702]: E1124 17:49:06.207382 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:07.207364752 +0000 UTC m=+36.448105916 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.210746 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.220770 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.233553 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.240661 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.240702 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.240719 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.240739 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.240751 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.245557 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.255486 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.265608 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:06Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.343036 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.343071 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.343081 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.343093 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.343102 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.445566 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.445648 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.445671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.445707 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.445732 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.548525 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.548571 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.548584 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.548603 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.548617 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.652279 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.652688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.652701 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.652721 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.652735 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.755204 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.755254 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.755267 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.755285 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.755299 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.858337 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.858373 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.858382 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.858398 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.858408 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.960382 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.960435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.960449 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.960470 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:06 crc kubenswrapper[4702]: I1124 17:49:06.960484 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:06Z","lastTransitionTime":"2025-11-24T17:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.062365 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.062401 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.062409 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.062424 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.062433 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.165130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.165166 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.165176 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.165194 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.165204 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.215404 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.215628 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.215731 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:09.2157084 +0000 UTC m=+38.456449644 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.267112 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.267183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.267196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.267214 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.267266 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.316848 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.317029 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:49:23.316997671 +0000 UTC m=+52.557738835 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.369436 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.369476 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.369488 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.369502 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.369511 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.418324 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.418393 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.418437 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.418502 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418548 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418587 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418621 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418658 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418667 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418697 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418716 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418672 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418640 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:23.418622331 +0000 UTC m=+52.659363495 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418784 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:23.418768615 +0000 UTC m=+52.659509799 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418831 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:23.418792146 +0000 UTC m=+52.659533320 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.418847 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:23.418839337 +0000 UTC m=+52.659580521 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.471993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.472043 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.472059 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.472079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.472093 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.575148 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.575206 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.575224 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.575252 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.575276 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.647288 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.647315 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.647289 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.647414 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.647461 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.647572 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.647664 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.647872 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.677971 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.678046 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.678064 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.678089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.678107 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.781193 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.781227 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.781236 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.781250 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.781258 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.803258 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.803298 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.803312 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.803331 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.803341 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.816736 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:07Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.821329 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.821391 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.821400 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.821416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.821426 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.834350 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:07Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.841993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.842076 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.842093 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.842118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.842136 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.855463 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:07Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.859179 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.859220 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.859232 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.859251 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.859262 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.873111 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:07Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.876340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.876389 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.876407 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.876428 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.876445 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.889509 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:07Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:07 crc kubenswrapper[4702]: E1124 17:49:07.889960 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.891562 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.891687 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.891835 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.891936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.892016 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.994864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.994912 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.994927 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.994950 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:07 crc kubenswrapper[4702]: I1124 17:49:07.994968 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:07Z","lastTransitionTime":"2025-11-24T17:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.097767 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.097838 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.097848 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.097866 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.097877 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.200638 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.200688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.200702 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.200721 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.200734 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.303186 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.303225 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.303236 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.303252 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.303261 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.406502 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.406574 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.406587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.406608 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.406623 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.508694 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.508725 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.508733 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.508746 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.508755 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.611215 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.611248 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.611256 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.611269 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.611277 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.714085 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.714128 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.714137 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.714151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.714161 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.816435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.816472 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.816481 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.816497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.816507 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.919245 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.919306 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.919320 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.919342 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:08 crc kubenswrapper[4702]: I1124 17:49:08.919360 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:08Z","lastTransitionTime":"2025-11-24T17:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.021461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.021769 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.021910 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.022004 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.022067 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.124645 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.124675 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.124684 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.124697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.124705 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.226752 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.226829 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.226852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.226876 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.226892 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.235442 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.235614 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.235693 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:13.235670103 +0000 UTC m=+42.476411297 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.330037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.330086 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.330100 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.330119 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.330132 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.432613 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.432673 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.432690 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.432711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.432727 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.535204 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.535253 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.535262 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.535278 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.535288 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.637463 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.637509 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.637519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.637535 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.637547 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.648037 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.648068 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.648099 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.648034 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.648154 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.648228 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.648374 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:09 crc kubenswrapper[4702]: E1124 17:49:09.648557 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.742139 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.742215 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.742228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.742248 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.742265 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.845651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.845677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.845686 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.845698 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.845707 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.947754 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.947786 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.947821 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.947834 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:09 crc kubenswrapper[4702]: I1124 17:49:09.947844 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:09Z","lastTransitionTime":"2025-11-24T17:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.050438 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.050506 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.050523 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.050548 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.050566 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.153855 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.153942 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.153971 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.154001 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.154024 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.256599 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.256660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.256679 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.256703 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.256718 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.358725 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.358824 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.358843 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.358869 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.358888 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.461462 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.461541 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.461567 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.461601 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.461623 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.564691 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.564854 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.564867 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.564888 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.564903 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.667515 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.667569 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.667603 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.667629 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.667643 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.769965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.770011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.770025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.770043 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.770055 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.873507 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.873635 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.873665 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.873693 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.873713 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.976992 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.977030 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.977038 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.977056 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:10 crc kubenswrapper[4702]: I1124 17:49:10.977065 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:10Z","lastTransitionTime":"2025-11-24T17:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.079707 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.079751 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.079761 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.079784 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.079823 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.182029 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.182097 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.182108 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.182123 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.182133 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.284313 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.284351 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.284362 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.284377 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.284390 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.386940 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.387051 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.387066 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.387091 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.387106 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.491263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.491334 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.491346 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.491368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.491380 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594461 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594877 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594923 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594937 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.594973 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.609363 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.625425 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.641763 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.648016 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.648215 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.652816 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.656690 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:11 crc kubenswrapper[4702]: E1124 17:49:11.656697 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:11 crc kubenswrapper[4702]: E1124 17:49:11.656827 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:11 crc kubenswrapper[4702]: E1124 17:49:11.656922 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:11 crc kubenswrapper[4702]: E1124 17:49:11.656997 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.670720 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.688226 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.697140 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.697205 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.697218 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.697237 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.697250 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.701943 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.720713 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.733364 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.744760 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.757223 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.771389 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.784012 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.798781 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.800442 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.800491 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.800504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.800524 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.800537 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.812024 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.826659 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.838699 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.850049 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.864856 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.879938 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.895435 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.903238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.903272 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.903284 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.903305 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.903319 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:11Z","lastTransitionTime":"2025-11-24T17:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.911889 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.928027 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.938564 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.949273 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.959168 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.970359 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.984778 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:11 crc kubenswrapper[4702]: I1124 17:49:11.999484 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:11Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.006330 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.006399 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.006419 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.006451 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.006471 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.026164 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.042563 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.059761 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.077759 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.109002 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.109056 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.109070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.109091 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.109105 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.211898 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.211947 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.211959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.211980 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.211995 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.315023 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.315078 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.315088 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.315103 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.315114 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.418596 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.418665 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.418690 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.418725 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.418751 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.522367 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.522418 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.522432 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.522450 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.522463 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.625271 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.625311 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.625322 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.625337 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.625348 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.728223 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.728291 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.728305 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.728323 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.728334 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.831340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.831409 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.831421 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.831445 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.831459 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.934196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.934270 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.934290 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.934317 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:12 crc kubenswrapper[4702]: I1124 17:49:12.934339 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:12Z","lastTransitionTime":"2025-11-24T17:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.036335 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.036363 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.036374 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.036387 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.036396 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.138721 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.138749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.138758 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.138771 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.138831 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.241700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.241737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.241747 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.241760 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.241769 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.278634 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.278784 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.278909 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:21.278888806 +0000 UTC m=+50.519629970 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.344213 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.344256 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.344266 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.344279 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.344290 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.447241 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.447299 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.447311 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.447328 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.447340 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.549973 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.550041 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.550055 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.550077 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.550091 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.647509 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.647600 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.647639 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.647791 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.647926 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.648006 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.648126 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:13 crc kubenswrapper[4702]: E1124 17:49:13.648284 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.651746 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.651849 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.651876 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.651909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.651927 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.754363 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.754403 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.754414 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.754427 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.754436 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.857396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.857437 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.857458 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.857478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.857489 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.961220 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.961307 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.961328 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.961360 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:13 crc kubenswrapper[4702]: I1124 17:49:13.961381 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:13Z","lastTransitionTime":"2025-11-24T17:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.065027 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.065116 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.065162 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.065204 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.065227 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.168733 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.168863 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.168902 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.168938 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.168962 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.271748 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.271810 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.271820 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.271840 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.271851 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.374915 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.374968 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.374976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.374992 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.375002 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.478158 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.478246 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.478271 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.478305 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.478331 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.581998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.582059 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.582075 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.582096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.582111 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.685027 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.685070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.685079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.685096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.685105 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.787611 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.787650 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.787663 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.787681 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.787692 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.890393 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.890462 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.890485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.890508 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.890542 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.993599 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.993649 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.993662 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.993680 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:14 crc kubenswrapper[4702]: I1124 17:49:14.993692 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:14Z","lastTransitionTime":"2025-11-24T17:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.096412 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.096446 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.096455 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.096471 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.096482 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.199012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.199054 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.199065 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.199080 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.199093 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.301476 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.301536 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.301554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.301579 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.301596 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.403586 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.403633 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.403645 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.403662 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.403673 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.506445 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.506501 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.506523 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.506545 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.506560 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.609018 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.609096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.609109 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.609129 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.609142 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.648010 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.648081 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.648126 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:15 crc kubenswrapper[4702]: E1124 17:49:15.648161 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.648206 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:15 crc kubenswrapper[4702]: E1124 17:49:15.648327 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:15 crc kubenswrapper[4702]: E1124 17:49:15.648519 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:15 crc kubenswrapper[4702]: E1124 17:49:15.648627 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.711524 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.711565 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.711573 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.711587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.711596 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.814518 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.814561 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.814587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.814604 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.814616 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.918060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.918121 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.918142 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.918163 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:15 crc kubenswrapper[4702]: I1124 17:49:15.918179 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:15Z","lastTransitionTime":"2025-11-24T17:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.021282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.021317 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.021326 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.021341 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.021352 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.123864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.123918 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.123987 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.124005 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.124019 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.226530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.226578 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.226589 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.226605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.226616 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.329571 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.329605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.329614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.329627 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.329636 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.433045 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.433092 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.433103 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.433124 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.433138 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.535467 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.535539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.535563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.535592 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.535615 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.638185 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.638218 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.638228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.638244 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.638254 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.740950 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.741015 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.741025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.741038 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.741046 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.843441 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.843479 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.843487 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.843503 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.843512 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.945434 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.945478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.945488 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.945504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:16 crc kubenswrapper[4702]: I1124 17:49:16.945516 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:16Z","lastTransitionTime":"2025-11-24T17:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.047552 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.047588 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.047598 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.047615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.047627 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.149202 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.149241 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.149250 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.149263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.149275 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.252443 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.252483 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.252495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.252510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.252520 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.354529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.354572 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.354582 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.354598 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.354607 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.458404 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.458491 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.458514 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.458529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.458538 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.560773 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.560854 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.560866 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.560886 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.560898 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.647279 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.647355 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.647391 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.647417 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:17 crc kubenswrapper[4702]: E1124 17:49:17.647441 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:17 crc kubenswrapper[4702]: E1124 17:49:17.647514 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:17 crc kubenswrapper[4702]: E1124 17:49:17.647594 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:17 crc kubenswrapper[4702]: E1124 17:49:17.647776 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.663037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.663062 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.663069 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.663082 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.663091 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.765404 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.765443 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.765454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.765468 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.765478 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.868203 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.868278 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.868301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.868327 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.868342 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.971465 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.971550 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.971563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.971589 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:17 crc kubenswrapper[4702]: I1124 17:49:17.971606 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:17Z","lastTransitionTime":"2025-11-24T17:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.073975 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.074063 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.074491 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.074581 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.075044 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.168357 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.168401 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.168414 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.168433 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.168448 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.181147 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.184828 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.184863 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.184874 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.184889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.184901 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.198487 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.202346 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.202370 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.202377 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.202390 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.202398 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.215017 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.218338 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.218370 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.218381 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.218396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.218410 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.228991 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.231776 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.231913 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.232007 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.232095 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.232171 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.248056 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: E1124 17:49:18.248549 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.249929 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.249952 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.249965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.249981 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.249992 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.352963 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.353000 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.353011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.353026 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.353037 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.455392 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.455435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.455446 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.455463 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.455477 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.558124 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.558188 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.558201 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.558220 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.558232 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.647847 4702 scope.go:117] "RemoveContainer" containerID="d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.660492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.660522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.660530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.660543 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.660551 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.763015 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.763416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.763427 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.763440 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.763451 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.866055 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.866118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.866128 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.866143 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.866152 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.933953 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/1.log" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.936832 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.937233 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.950614 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.963275 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.968466 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.968519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.968529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.968546 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.968556 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:18Z","lastTransitionTime":"2025-11-24T17:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.975774 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:18 crc kubenswrapper[4702]: I1124 17:49:18.992158 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:18Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.007268 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.020286 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.031246 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.047778 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.062282 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.071066 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.071103 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.071113 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.071145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.071156 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.080689 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.100021 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.115367 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.136853 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.151635 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.170752 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.173449 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.173498 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.173517 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.173530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.173539 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.189557 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.276447 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.276494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.276504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.276551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.276563 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.378779 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.378831 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.378841 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.378856 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.378864 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.480590 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.480650 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.480661 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.480677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.480688 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.583292 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.583324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.583332 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.583344 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.583353 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.648146 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.648146 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.648486 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.648692 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:19 crc kubenswrapper[4702]: E1124 17:49:19.648699 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:19 crc kubenswrapper[4702]: E1124 17:49:19.648843 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:19 crc kubenswrapper[4702]: E1124 17:49:19.648887 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:19 crc kubenswrapper[4702]: E1124 17:49:19.649292 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.685594 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.685631 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.685640 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.685653 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.685664 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.788741 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.788780 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.788791 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.788827 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.788839 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.891370 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.891413 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.891423 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.891437 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.891447 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.941360 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/2.log" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.942102 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/1.log" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.944385 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" exitCode=1 Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.944429 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46"} Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.944471 4702 scope.go:117] "RemoveContainer" containerID="d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.945138 4702 scope.go:117] "RemoveContainer" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" Nov 24 17:49:19 crc kubenswrapper[4702]: E1124 17:49:19.945335 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.958548 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.969459 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.983000 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993758 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993814 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993823 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993775 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:19Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993836 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:19 crc kubenswrapper[4702]: I1124 17:49:19.993848 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:19Z","lastTransitionTime":"2025-11-24T17:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.003426 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.020299 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d9d14f6873696ac3866e8d2b418e8d31688f091917c6bc365c45a7d14aac28ab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"message\\\":\\\"nformers/factory.go:160\\\\nI1124 17:49:04.394471 6133 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.398743 6133 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:49:04.411080 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:49:04.411146 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 17:49:04.411733 6133 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:49:04.417408 6133 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:49:04.417582 6133 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:49:04.418457 6133 factory.go:656] Stopping watch factory\\\\nI1124 17:49:04.445401 6133 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 17:49:04.445445 6133 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 17:49:04.445518 6133 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:49:04.445542 6133 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 17:49:04.445667 6133 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.032549 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.044254 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.061502 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.071226 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.081676 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.092614 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.096324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.096357 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.096365 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.096378 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.096387 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.113829 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.132156 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.146123 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.155350 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.198316 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.198369 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.198383 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.198401 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.198412 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.300563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.300615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.300627 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.300644 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.300661 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.403045 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.403072 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.403081 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.403093 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.403102 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.505426 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.505469 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.505479 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.505495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.505505 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.608186 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.608227 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.608236 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.608256 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.608267 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.710681 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.710722 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.710737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.710759 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.710772 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.814151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.814208 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.814221 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.814238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.814251 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.916906 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.916947 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.916959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.916976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.916989 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:20Z","lastTransitionTime":"2025-11-24T17:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.948783 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/2.log" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.952255 4702 scope.go:117] "RemoveContainer" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" Nov 24 17:49:20 crc kubenswrapper[4702]: E1124 17:49:20.952439 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.966882 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.979198 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:20 crc kubenswrapper[4702]: I1124 17:49:20.992268 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:20Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.002158 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.013333 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.019696 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.019749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.019762 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.019780 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.019792 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.025072 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.034656 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.047726 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.058208 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.069044 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.078886 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.088663 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.101090 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.113550 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.122282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.122324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.122336 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.122353 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.122366 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.125838 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.143579 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.224557 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.224901 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.224995 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.225095 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.225182 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.327502 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.327531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.327539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.327553 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.327563 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.358666 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.359002 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.359253 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:49:37.359223853 +0000 UTC m=+66.599965087 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.429496 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.429535 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.429545 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.429559 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.429571 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.531930 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.531996 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.532013 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.532037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.532056 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.634453 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.634495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.634506 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.634522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.634533 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.647756 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.647778 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.647828 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.647764 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.647900 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.647984 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.648048 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:21 crc kubenswrapper[4702]: E1124 17:49:21.648108 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.664149 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.676433 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.696615 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.715269 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.734292 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.736916 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.736954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.736965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.736982 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.736993 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.748516 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.762717 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.776224 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.781154 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.792702 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.797868 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.808435 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.819943 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.829910 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.839338 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.839388 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.839399 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.839417 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.839429 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.844241 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.857155 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.870650 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.889313 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.900599 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.911657 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.921613 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.933210 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.941244 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.941282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.941292 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.941310 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.941322 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:21Z","lastTransitionTime":"2025-11-24T17:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.945104 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.956930 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.973149 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.985176 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:21 crc kubenswrapper[4702]: I1124 17:49:21.996444 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.010213 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.021780 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.033073 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.043653 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.043697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.043714 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.043735 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.043746 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.045865 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.057218 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.067567 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.076734 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.087602 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.145665 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.145714 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.145724 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.145741 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.145754 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.248072 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.248120 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.248130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.248147 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.248156 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.350777 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.350898 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.350936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.350967 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.350989 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.455014 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.455111 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.455126 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.455144 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.455164 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.558032 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.558068 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.558076 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.558091 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.558100 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.660770 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.660826 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.660840 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.660857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.660869 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.762751 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.762840 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.762857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.762875 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.762895 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.864997 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.865046 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.865058 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.865075 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.865087 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.967701 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.967775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.967787 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.967835 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:22 crc kubenswrapper[4702]: I1124 17:49:22.967850 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:22Z","lastTransitionTime":"2025-11-24T17:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.070357 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.070849 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.070866 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.070885 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.070899 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.172856 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.172909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.172919 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.172934 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.172944 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.274962 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.274987 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.274996 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.275012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.275021 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.377290 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.377345 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.377360 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.377379 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.377397 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.378113 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.378296 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:49:55.37827355 +0000 UTC m=+84.619014724 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.479129 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.479208 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.479249 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.479278 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479280 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479311 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479331 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479354 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479368 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479404 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:55.479380954 +0000 UTC m=+84.720122148 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479434 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:55.479420626 +0000 UTC m=+84.720161890 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479453 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:55.479443367 +0000 UTC m=+84.720184631 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479561 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479577 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479587 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.479634 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:49:55.479623012 +0000 UTC m=+84.720364176 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.480450 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.480481 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.480492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.480507 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.480524 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.582994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.583908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.584069 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.584216 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.584344 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.647709 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.647756 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.647842 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.647847 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.648606 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.648657 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.648427 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:23 crc kubenswrapper[4702]: E1124 17:49:23.648691 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.687369 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.687410 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.687425 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.687441 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.687451 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.790255 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.790293 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.790303 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.790317 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.790330 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.892776 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.892847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.892868 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.892889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.892901 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.995630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.995697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.995711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.995728 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:23 crc kubenswrapper[4702]: I1124 17:49:23.995742 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:23Z","lastTransitionTime":"2025-11-24T17:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.097985 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.098027 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.098044 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.098060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.098070 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.200152 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.200196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.200210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.200227 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.200241 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.302550 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.302588 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.302598 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.302616 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.302625 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.404973 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.405003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.405011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.405026 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.405035 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.507117 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.507155 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.507164 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.507180 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.507189 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.609595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.609627 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.609635 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.609649 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.609660 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.711864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.711959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.711969 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.711984 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.711993 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.814355 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.814395 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.814405 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.814420 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.814429 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.916749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.916789 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.916816 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.916831 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:24 crc kubenswrapper[4702]: I1124 17:49:24.916840 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:24Z","lastTransitionTime":"2025-11-24T17:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.019660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.019716 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.019727 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.019740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.019749 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.122224 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.122259 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.122268 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.122281 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.122290 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.224609 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.224646 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.224656 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.224671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.224682 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.326662 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.326723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.326737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.326757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.326771 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.429202 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.429237 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.429247 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.429261 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.429272 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.531850 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.531924 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.531937 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.531966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.531989 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.634633 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.634680 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.634688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.634704 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.634713 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.647980 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.648038 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.648005 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:25 crc kubenswrapper[4702]: E1124 17:49:25.648113 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.647982 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:25 crc kubenswrapper[4702]: E1124 17:49:25.648287 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:25 crc kubenswrapper[4702]: E1124 17:49:25.648414 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:25 crc kubenswrapper[4702]: E1124 17:49:25.648521 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.736998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.737036 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.737045 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.737059 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.737068 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.839552 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.839654 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.839671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.839692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.839702 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.942103 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.942145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.942155 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.942175 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:25 crc kubenswrapper[4702]: I1124 17:49:25.942187 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:25Z","lastTransitionTime":"2025-11-24T17:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.045077 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.045130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.045144 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.045161 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.045175 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.147703 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.147740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.147749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.147782 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.147792 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.249939 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.249981 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.249992 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.250012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.250023 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.353095 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.353158 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.353172 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.353187 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.353197 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.455525 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.455590 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.455604 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.455621 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.455631 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.558023 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.558060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.558068 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.558083 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.558091 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.660729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.660831 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.660845 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.660872 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.660886 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.763190 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.763236 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.763247 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.763265 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.763277 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.866088 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.866144 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.866156 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.866172 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.866181 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.968651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.968686 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.968694 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.968706 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:26 crc kubenswrapper[4702]: I1124 17:49:26.968714 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:26Z","lastTransitionTime":"2025-11-24T17:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.071015 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.071124 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.071138 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.071162 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.071173 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.174203 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.174260 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.174277 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.174300 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.174317 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.277107 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.277145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.277154 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.277167 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.277175 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.381100 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.381172 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.381184 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.381201 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.381213 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.484410 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.484471 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.484489 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.484510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.484523 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.587711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.587752 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.587761 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.587775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.587784 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.647876 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.647939 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.647904 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.647885 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:27 crc kubenswrapper[4702]: E1124 17:49:27.648039 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:27 crc kubenswrapper[4702]: E1124 17:49:27.648179 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:27 crc kubenswrapper[4702]: E1124 17:49:27.648409 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:27 crc kubenswrapper[4702]: E1124 17:49:27.648577 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.690935 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.690999 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.691012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.691032 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.691045 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.793834 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.793889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.793902 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.793924 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.793939 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.897471 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.897519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.897531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.897548 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:27 crc kubenswrapper[4702]: I1124 17:49:27.897557 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:27Z","lastTransitionTime":"2025-11-24T17:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.000098 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.000150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.000159 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.000176 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.000185 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.102485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.102523 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.102534 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.102548 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.102558 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.205624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.205674 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.205687 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.205705 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.205718 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.308481 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.308530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.308539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.308554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.308563 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.411263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.411349 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.411365 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.411391 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.411411 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.513625 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.513677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.513688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.513704 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.513716 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.616100 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.616140 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.616154 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.616170 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.616179 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.624738 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.624781 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.624891 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.624910 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.624924 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.636889 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:28Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.641322 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.641368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.641377 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.641391 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.641400 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.652982 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:28Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.656138 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.656180 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.656192 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.656205 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.656214 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.666638 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:28Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.669944 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.669991 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.670003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.670022 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.670034 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.681818 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:28Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.685432 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.685464 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.685475 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.685491 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.685505 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.700253 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:28Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:28 crc kubenswrapper[4702]: E1124 17:49:28.700416 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.719083 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.719125 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.719136 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.719151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.719159 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.821793 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.821848 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.821857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.821871 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.821882 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.924630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.924679 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.924690 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.924711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:28 crc kubenswrapper[4702]: I1124 17:49:28.924723 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:28Z","lastTransitionTime":"2025-11-24T17:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.027428 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.027478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.027494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.027511 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.027523 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.130139 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.130168 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.130176 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.130189 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.130198 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.232284 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.232345 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.232359 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.232377 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.232850 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.335399 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.335443 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.335453 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.335470 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.335483 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.438377 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.438412 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.438422 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.438435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.438444 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.540490 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.540522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.540532 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.540545 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.540554 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.643156 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.643188 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.643196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.643210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.643219 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.647664 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.647687 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.647729 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:29 crc kubenswrapper[4702]: E1124 17:49:29.647769 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.647662 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:29 crc kubenswrapper[4702]: E1124 17:49:29.648094 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:29 crc kubenswrapper[4702]: E1124 17:49:29.648141 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:29 crc kubenswrapper[4702]: E1124 17:49:29.648175 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.745522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.745552 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.745560 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.745573 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.745584 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.847697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.847734 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.847745 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.847762 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.847774 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.949542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.949579 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.949588 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.949602 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:29 crc kubenswrapper[4702]: I1124 17:49:29.949610 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:29Z","lastTransitionTime":"2025-11-24T17:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.052133 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.052181 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.052193 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.052211 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.052225 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.154948 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.154984 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.154994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.155012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.155025 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.258010 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.258079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.258096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.258118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.258166 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.360615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.360692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.360700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.360713 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.360723 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.462412 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.462475 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.462487 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.462500 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.462508 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.566986 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.567071 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.567102 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.567148 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.567169 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.669564 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.669614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.669626 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.669643 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.669655 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.772035 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.772072 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.772083 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.772099 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.772110 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.874736 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.874775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.874784 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.874813 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.874829 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.977183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.977228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.977238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.977253 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:30 crc kubenswrapper[4702]: I1124 17:49:30.977262 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:30Z","lastTransitionTime":"2025-11-24T17:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.083267 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.083614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.083627 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.083646 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.083658 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.186217 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.186268 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.186280 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.186299 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.186312 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.288967 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.289012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.289023 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.289039 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.289051 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.391587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.391624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.391635 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.391651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.391663 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.493416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.493452 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.493460 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.493473 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.493482 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.595772 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.595827 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.595845 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.595859 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.595867 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.647108 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.647160 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.647173 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:31 crc kubenswrapper[4702]: E1124 17:49:31.647691 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.647877 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:31 crc kubenswrapper[4702]: E1124 17:49:31.647962 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:31 crc kubenswrapper[4702]: E1124 17:49:31.648040 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:31 crc kubenswrapper[4702]: E1124 17:49:31.648228 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.660850 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.673881 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.684425 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.694314 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.697700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.697729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.697741 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.697757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.697768 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.705373 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.720364 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.732933 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.744224 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.759764 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.773058 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.787691 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.800772 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.800829 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.800840 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.800857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.800872 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.802562 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.812693 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.824938 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.837288 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.850781 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.869243 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:31Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.904055 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.904105 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.904117 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.904133 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:31 crc kubenswrapper[4702]: I1124 17:49:31.904143 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:31Z","lastTransitionTime":"2025-11-24T17:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.006447 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.006499 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.006510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.006528 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.006540 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.109763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.109841 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.109852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.109873 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.109884 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.213833 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.214130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.214265 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.214369 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.214459 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.317295 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.317329 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.317340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.317359 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.317369 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.420119 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.420154 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.420168 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.420182 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.420190 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.522388 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.522673 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.522745 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.522827 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.522894 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.625624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.626260 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.626351 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.626485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.626581 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.728638 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.728692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.728704 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.728721 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.728733 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.831295 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.831324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.831334 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.831349 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.831360 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.934135 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.934176 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.934185 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.934199 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:32 crc kubenswrapper[4702]: I1124 17:49:32.934208 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:32Z","lastTransitionTime":"2025-11-24T17:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.036740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.036780 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.036790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.036821 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.036830 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.139765 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.139841 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.139852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.139871 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.139881 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.242166 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.242212 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.242229 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.242247 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.242257 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.344879 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.344924 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.344937 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.344952 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.344965 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.447326 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.447374 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.447382 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.447396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.447406 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.549945 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.549987 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.549998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.550011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.550020 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.647195 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.647223 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.647229 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:33 crc kubenswrapper[4702]: E1124 17:49:33.647320 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.647357 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:33 crc kubenswrapper[4702]: E1124 17:49:33.647547 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:33 crc kubenswrapper[4702]: E1124 17:49:33.647541 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:33 crc kubenswrapper[4702]: E1124 17:49:33.647615 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.651868 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.651899 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.651909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.651925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.651935 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.755218 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.755975 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.756038 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.756070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.756087 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.859196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.859245 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.859301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.859323 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.859336 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.961856 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.961927 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.961944 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.961970 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:33 crc kubenswrapper[4702]: I1124 17:49:33.961985 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:33Z","lastTransitionTime":"2025-11-24T17:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.064178 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.064243 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.064255 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.064274 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.064288 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.166444 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.166522 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.166531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.166546 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.166556 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.270497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.270552 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.270565 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.270582 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.270595 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.373397 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.373461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.373471 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.373487 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.373497 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.476479 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.476564 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.476584 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.476605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.476616 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.579008 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.579062 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.579074 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.579088 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.579097 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.682052 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.682102 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.682113 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.682128 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.682139 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.784110 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.784172 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.784184 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.784199 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.784211 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.886713 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.886760 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.886768 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.886783 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.886816 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.988427 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.988468 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.988477 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.988492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:34 crc kubenswrapper[4702]: I1124 17:49:34.988501 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:34Z","lastTransitionTime":"2025-11-24T17:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.090825 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.090856 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.090864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.090878 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.090886 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.193214 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.193250 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.193260 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.193273 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.193281 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.295844 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.295918 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.295930 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.295946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.295956 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.398151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.398205 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.398214 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.398228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.398237 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.500366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.500424 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.500433 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.500450 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.500459 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.602616 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.602651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.602659 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.602677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.602687 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.647630 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:35 crc kubenswrapper[4702]: E1124 17:49:35.647895 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.647987 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.648030 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.647980 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:35 crc kubenswrapper[4702]: E1124 17:49:35.648151 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:35 crc kubenswrapper[4702]: E1124 17:49:35.648237 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:35 crc kubenswrapper[4702]: E1124 17:49:35.648367 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.705057 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.705101 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.705111 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.705130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.705140 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.808430 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.808469 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.808480 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.808495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.808506 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.910618 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.910657 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.910666 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.910683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:35 crc kubenswrapper[4702]: I1124 17:49:35.910696 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:35Z","lastTransitionTime":"2025-11-24T17:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.013240 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.013273 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.013284 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.013301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.013314 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.115722 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.115757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.115766 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.115781 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.115817 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.239008 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.239052 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.239062 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.239079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.239094 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.342119 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.342168 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.342183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.342199 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.342212 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.445022 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.445057 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.445065 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.445079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.445088 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.547257 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.547309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.547323 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.547341 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.547350 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.648476 4702 scope.go:117] "RemoveContainer" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" Nov 24 17:49:36 crc kubenswrapper[4702]: E1124 17:49:36.648823 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.649248 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.649276 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.649284 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.649297 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.649307 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.752387 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.752430 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.752440 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.752460 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.752472 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.855728 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.855774 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.855785 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.855816 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.855829 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.958395 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.958443 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.958458 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.958475 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:36 crc kubenswrapper[4702]: I1124 17:49:36.958487 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:36Z","lastTransitionTime":"2025-11-24T17:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.061145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.061324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.061378 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.061396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.061406 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.164166 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.164211 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.164222 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.164238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.164248 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.266424 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.266461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.266472 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.266492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.266503 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.368729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.368784 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.368809 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.368829 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.368840 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.417758 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.417905 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.417975 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:50:09.417958556 +0000 UTC m=+98.658699730 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.471457 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.471519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.471531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.471547 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.471558 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.573656 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.573687 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.573695 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.573708 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.573716 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.647508 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.647551 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.647515 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.647515 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.647662 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.647740 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.647827 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:37 crc kubenswrapper[4702]: E1124 17:49:37.648018 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.675561 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.675603 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.675613 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.675630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.675641 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.777911 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.777949 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.777959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.777978 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.777988 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.879923 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.879980 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.879996 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.880020 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.880033 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.982559 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.982597 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.982606 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.982618 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:37 crc kubenswrapper[4702]: I1124 17:49:37.982627 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:37Z","lastTransitionTime":"2025-11-24T17:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.085490 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.085539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.085551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.085571 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.085581 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.187540 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.187587 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.187597 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.187618 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.187630 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.289849 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.289901 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.289915 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.289939 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.289951 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.392306 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.392353 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.392363 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.392379 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.392390 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.493980 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.494014 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.494022 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.494036 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.494045 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.596683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.596732 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.596743 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.596758 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.596768 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.698891 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.698929 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.698940 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.698957 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.698968 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.800717 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.800778 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.800790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.800826 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.800838 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.903327 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.903386 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.903415 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.903431 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:38 crc kubenswrapper[4702]: I1124 17:49:38.903441 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:38Z","lastTransitionTime":"2025-11-24T17:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.004892 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.004930 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.004938 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.004952 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.004962 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.086243 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.086302 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.086312 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.086327 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.086342 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.100502 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:39Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.104505 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.104557 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.104574 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.104595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.104611 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.118480 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:39Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.122859 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.122894 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.122903 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.122917 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.122926 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.133931 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:39Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.137476 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.137515 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.137551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.137567 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.137577 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.148473 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:39Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.151763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.151812 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.151826 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.151842 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.151853 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.162536 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:39Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.162644 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.163846 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.163878 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.163891 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.163908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.163916 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.266106 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.266154 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.266168 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.266183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.266192 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.368433 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.368478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.368492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.368550 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.368564 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.472376 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.472426 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.472437 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.472454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.472465 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.574409 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.574452 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.574460 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.574474 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.574484 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.647478 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.647527 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.647536 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.647500 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.647616 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.647679 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.647766 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:39 crc kubenswrapper[4702]: E1124 17:49:39.647849 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.676298 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.676330 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.676340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.676352 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.676362 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.778667 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.778716 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.778727 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.778747 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.778759 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.881692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.881728 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.881742 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.881759 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.881770 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.983942 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.983992 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.984009 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.984026 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:39 crc kubenswrapper[4702]: I1124 17:49:39.984038 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:39Z","lastTransitionTime":"2025-11-24T17:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.003452 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/0.log" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.003500 4702 generic.go:334] "Generic (PLEG): container finished" podID="f4859751-212a-4d94-b0c7-875b1da99cd8" containerID="87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1" exitCode=1 Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.003533 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerDied","Data":"87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.003906 4702 scope.go:117] "RemoveContainer" containerID="87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.020359 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.032486 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.046851 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.057576 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.071355 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.083096 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.085963 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.085990 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.085998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.086012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.086021 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.094296 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.105638 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.118179 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.135436 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.151471 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.163946 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.182534 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.188292 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.188340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.188353 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.188510 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.188529 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.196645 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.212155 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.230604 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.243201 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:40Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.290959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.291001 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.291013 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.291028 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.291039 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.393634 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.393672 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.393683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.393698 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.393707 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.496148 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.496188 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.496201 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.496217 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.496228 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.599069 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.599108 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.599118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.599134 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.599145 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.700854 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.700893 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.700906 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.700921 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.700933 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.803737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.803777 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.803787 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.803827 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.803839 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.906863 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.906960 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.906994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.907967 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:40 crc kubenswrapper[4702]: I1124 17:49:40.907981 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:40Z","lastTransitionTime":"2025-11-24T17:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009291 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/0.log" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009400 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerStarted","Data":"d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009505 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009568 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.009583 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.020089 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.030772 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.041553 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.049591 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.059839 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.074752 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.087666 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.099210 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.112173 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.112200 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.112207 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.112221 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.112231 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.120360 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.134259 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.147122 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.161642 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.172654 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.187938 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.200478 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.214192 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.214263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.214280 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.214327 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.214341 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.215179 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.228760 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.317215 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.317245 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.317257 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.317271 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.317280 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.419887 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.419950 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.419959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.419974 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.419985 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.523000 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.523037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.523054 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.523070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.523079 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.625775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.625831 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.625844 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.625859 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.625868 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.647386 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.647450 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.647407 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.647406 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:41 crc kubenswrapper[4702]: E1124 17:49:41.647529 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:41 crc kubenswrapper[4702]: E1124 17:49:41.647660 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:41 crc kubenswrapper[4702]: E1124 17:49:41.647706 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:41 crc kubenswrapper[4702]: E1124 17:49:41.647743 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.661577 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.673417 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.684157 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.693419 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.705231 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.722983 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.727724 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.727763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.727776 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.727810 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.727823 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.737657 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.751068 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.769270 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.783958 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.799004 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.816633 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.827131 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.830321 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.830359 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.830370 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.830386 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.830397 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.840140 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.851009 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.861751 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.872934 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:41Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.932976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.933017 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.933033 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.933054 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:41 crc kubenswrapper[4702]: I1124 17:49:41.933066 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:41Z","lastTransitionTime":"2025-11-24T17:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.035751 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.035783 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.035790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.035819 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.035830 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.138542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.138579 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.138589 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.138605 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.138617 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.240989 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.241041 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.241054 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.241076 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.241087 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.343290 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.343333 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.343345 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.343362 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.343373 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.445951 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.446014 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.446029 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.446046 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.446059 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.548408 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.548447 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.548457 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.548472 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.548484 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.650829 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.650868 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.650878 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.650889 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.650898 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.752884 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.752931 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.752940 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.752956 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.752966 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.855490 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.855530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.855539 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.855554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.855565 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.957383 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.957434 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.957442 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.957458 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:42 crc kubenswrapper[4702]: I1124 17:49:42.957467 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:42Z","lastTransitionTime":"2025-11-24T17:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.059604 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.059642 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.059650 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.059664 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.059673 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.162441 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.162493 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.162504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.162521 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.162532 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.264774 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.264830 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.264842 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.264858 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.264869 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.367687 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.367729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.367740 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.367757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.367769 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.470506 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.470554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.470563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.470577 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.470587 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.573192 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.573252 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.573264 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.573282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.573298 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.647693 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:43 crc kubenswrapper[4702]: E1124 17:49:43.647953 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.648001 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.648085 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.648125 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:43 crc kubenswrapper[4702]: E1124 17:49:43.648366 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:43 crc kubenswrapper[4702]: E1124 17:49:43.648472 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:43 crc kubenswrapper[4702]: E1124 17:49:43.648560 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.675629 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.675673 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.675683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.675699 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.675710 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.778261 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.778289 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.778299 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.778313 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.778323 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.881070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.881111 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.881120 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.881136 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.881145 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.984040 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.984089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.984106 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.984125 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:43 crc kubenswrapper[4702]: I1124 17:49:43.984135 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:43Z","lastTransitionTime":"2025-11-24T17:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.085983 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.086011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.086019 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.086031 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.086039 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.188558 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.188627 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.188639 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.188655 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.188668 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.290913 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.290966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.290976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.290991 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.291001 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.393907 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.393951 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.393976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.393993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.394002 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.496651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.496713 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.496723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.496739 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.496775 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.599084 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.599139 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.599150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.599167 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.599178 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.659461 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.701511 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.701589 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.701598 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.701610 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.701619 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.803756 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.803791 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.803815 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.803834 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.803843 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.906696 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.906739 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.906752 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.906769 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:44 crc kubenswrapper[4702]: I1124 17:49:44.906781 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:44Z","lastTransitionTime":"2025-11-24T17:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.009527 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.009576 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.009590 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.009607 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.009619 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.111857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.111898 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.111937 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.111954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.111963 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.214635 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.214682 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.214692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.214710 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.214721 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.317422 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.317465 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.317479 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.317497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.317511 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.420160 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.420241 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.420263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.420294 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.420314 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.522553 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.522591 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.522600 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.522613 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.522622 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.625737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.625863 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.625882 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.625906 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.625924 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.648199 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.648281 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.648317 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:45 crc kubenswrapper[4702]: E1124 17:49:45.648442 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.648499 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:45 crc kubenswrapper[4702]: E1124 17:49:45.648604 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:45 crc kubenswrapper[4702]: E1124 17:49:45.648719 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:45 crc kubenswrapper[4702]: E1124 17:49:45.648774 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.728580 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.728620 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.728632 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.728648 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.728658 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.830690 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.830727 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.830737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.830751 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.830762 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.933853 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.933895 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.933903 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.933918 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:45 crc kubenswrapper[4702]: I1124 17:49:45.933927 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:45Z","lastTransitionTime":"2025-11-24T17:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.036454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.036501 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.036512 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.036526 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.036535 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.138554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.138601 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.138612 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.138630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.138641 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.240870 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.240909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.240920 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.240935 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.240946 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.343229 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.343274 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.343285 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.343301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.343313 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.445650 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.445688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.445700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.445716 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.445728 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.547700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.547747 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.547763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.547778 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.547787 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.650604 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.650694 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.650704 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.650729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.650741 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.754388 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.754454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.754466 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.754492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.754507 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.857332 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.857374 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.857383 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.857399 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.857410 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.960057 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.960109 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.960120 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.960136 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:46 crc kubenswrapper[4702]: I1124 17:49:46.960149 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:46Z","lastTransitionTime":"2025-11-24T17:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.062424 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.062497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.062518 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.062540 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.062557 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.165494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.165551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.165565 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.165590 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.165605 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.268883 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.268939 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.268952 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.268974 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.268989 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.371631 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.371695 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.371709 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.371728 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.371740 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.474431 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.474494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.474506 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.474519 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.474528 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.577686 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.577745 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.577755 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.577775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.577789 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.647479 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.647606 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.647663 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:47 crc kubenswrapper[4702]: E1124 17:49:47.647685 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.647752 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:47 crc kubenswrapper[4702]: E1124 17:49:47.647925 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:47 crc kubenswrapper[4702]: E1124 17:49:47.648026 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:47 crc kubenswrapper[4702]: E1124 17:49:47.648096 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.681092 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.681169 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.681195 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.681230 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.681257 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.786549 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.786607 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.786619 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.786639 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.786655 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.891036 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.891099 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.891114 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.891140 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.891157 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.993416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.993445 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.993453 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.993466 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:47 crc kubenswrapper[4702]: I1124 17:49:47.993474 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:47Z","lastTransitionTime":"2025-11-24T17:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.097039 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.097121 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.097141 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.097173 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.097199 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.200291 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.200366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.200385 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.200414 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.200434 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.304534 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.304620 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.304643 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.304677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.304701 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.407573 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.407606 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.407615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.407629 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.407638 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.510761 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.510846 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.510855 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.510870 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.510905 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.613857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.613916 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.613925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.613966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.613977 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.717378 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.717495 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.717523 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.717562 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.717588 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.820291 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.820390 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.820433 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.820484 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.820512 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.924184 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.924240 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.924251 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.924275 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:48 crc kubenswrapper[4702]: I1124 17:49:48.924288 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:48Z","lastTransitionTime":"2025-11-24T17:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.027763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.027847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.027864 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.027888 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.027905 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.131004 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.131080 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.131095 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.131123 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.131138 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.195090 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.195177 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.195194 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.195221 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.195241 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.218881 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.225180 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.225389 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.225478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.225606 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.225714 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.240989 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.246335 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.246474 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.246563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.246660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.246689 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.263023 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.268998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.269049 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.269064 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.269082 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.269094 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.283759 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.287993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.288030 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.288043 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.288060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.288073 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.301704 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.301894 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.304169 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.304228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.304237 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.304276 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.304310 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.406644 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.406699 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.406707 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.406723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.406738 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.514031 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.514092 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.514104 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.514126 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.514142 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.616904 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.616955 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.616966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.616984 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.616996 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.647673 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.647751 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.647858 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.647916 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.647928 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.648030 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.648099 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:49 crc kubenswrapper[4702]: E1124 17:49:49.648282 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.720067 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.720150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.720174 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.720208 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.720228 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.823426 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.823470 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.823482 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.823502 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.823515 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.926282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.926342 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.926355 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.926376 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:49 crc kubenswrapper[4702]: I1124 17:49:49.926391 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:49Z","lastTransitionTime":"2025-11-24T17:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.028896 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.028927 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.028936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.028952 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.028962 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.131163 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.131205 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.131222 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.131238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.131249 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.235030 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.235069 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.235078 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.235093 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.235102 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.337905 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.337946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.337954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.337969 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.337978 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.440120 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.440150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.440159 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.440172 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.440182 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.543408 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.543451 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.543461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.543478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.543488 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.645789 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.645848 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.645857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.645872 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.645881 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.748253 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.748332 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.748362 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.748384 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.748428 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.851303 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.851358 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.851368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.851384 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.851394 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.954279 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.954316 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.954324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.954340 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:50 crc kubenswrapper[4702]: I1124 17:49:50.954365 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:50Z","lastTransitionTime":"2025-11-24T17:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.057089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.057127 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.057163 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.057182 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.057194 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.159582 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.159632 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.159642 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.159660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.159670 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.263756 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.263851 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.263871 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.263895 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.263943 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.366829 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.366858 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.366867 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.366880 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.366890 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.469157 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.469194 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.469205 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.469223 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.469235 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.570935 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.570974 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.570982 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.570995 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.571004 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.647605 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.647688 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.647756 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:51 crc kubenswrapper[4702]: E1124 17:49:51.647762 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.647866 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:51 crc kubenswrapper[4702]: E1124 17:49:51.647892 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:51 crc kubenswrapper[4702]: E1124 17:49:51.647942 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.648592 4702 scope.go:117] "RemoveContainer" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" Nov 24 17:49:51 crc kubenswrapper[4702]: E1124 17:49:51.648594 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.669818 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.673836 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.673872 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.673883 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.673896 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.673907 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.684546 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.697388 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.719602 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.732874 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.745269 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.763292 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.776047 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.776089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.776101 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.776118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.776132 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.778714 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.791557 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.805436 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8ba4510-20bc-4c65-a72d-6cdf0fd554aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6529a81232f3f57051a0982e935a142238a75bc8f2e555d58c7f1422e48c8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.820400 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.833643 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.844651 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.859073 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.872417 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.881807 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.881869 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.881881 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.881896 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.881907 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.889998 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.903721 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.916162 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.984839 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.984874 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.984882 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.984896 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:51 crc kubenswrapper[4702]: I1124 17:49:51.984906 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:51Z","lastTransitionTime":"2025-11-24T17:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.046190 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/2.log" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.048476 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.048947 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.063613 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.075349 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.087558 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.087608 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.087619 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.087637 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.087648 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.092470 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.107115 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.121049 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.134286 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.143290 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.152938 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.160642 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8ba4510-20bc-4c65-a72d-6cdf0fd554aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6529a81232f3f57051a0982e935a142238a75bc8f2e555d58c7f1422e48c8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.170882 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.183612 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.189998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.190026 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.190033 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.190047 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.190056 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.196876 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.209479 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.222856 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.241986 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.255572 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.272731 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.284661 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.292736 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.292786 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.292819 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.292839 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.292854 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.395197 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.395242 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.395269 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.395286 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.395295 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.497704 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.497776 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.497791 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.497837 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.497847 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.600944 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.600977 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.600987 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.601001 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.601013 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.702923 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.702964 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.702976 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.702991 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.703002 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.806362 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.806431 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.806448 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.806470 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.806483 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.908999 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.909048 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.909060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.909081 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:52 crc kubenswrapper[4702]: I1124 17:49:52.909095 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:52Z","lastTransitionTime":"2025-11-24T17:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.011080 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.011118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.011128 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.011143 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.011153 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.053302 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/3.log" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.054046 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/2.log" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.056838 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" exitCode=1 Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.056890 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.056953 4702 scope.go:117] "RemoveContainer" containerID="7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.058245 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:49:53 crc kubenswrapper[4702]: E1124 17:49:53.058509 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.070917 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.094881 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7eac8fe3ec787fb872077f0bd2d375cd55be92ef1e5dae7d9d9ecadcc774ae46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:19Z\\\",\\\"message\\\":\\\"(s)\\\\nI1124 17:49:19.517272 6344 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1124 17:49:19.517282 6344 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nI1124 17:49:19.517295 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:19.517106 6344 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517325 6344 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-swrxh\\\\nI1124 17:49:19.517337 6344 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-swrxh in node crc\\\\nI1124 17:49:19.517349 6344 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-swrxh after 0 failed attempt(s)\\\\nF1124 17:49:19.517352 6344 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initiali\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:52Z\\\",\\\"message\\\":\\\" 6753 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-8g6cn\\\\nI1124 17:49:52.620010 6753 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:52.620238 6753 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-8g6cn in node crc\\\\nI1124 17:49:52.620243 6753 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-8g6cn after 0 failed attempt(s)\\\\nI1124 17:49:52.620248 6753 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-8g6cn\\\\nI1124 17:49:52.620099 6753 obj_retry.go:365] Adding new object: *v1.Pod openshift-image-registry/node-ca-xprzv\\\\nI1124 17:49:52.620263 6753 ovn.go:134] Ensuring zone local for Pod openshift-image-registry/node-ca-xprzv in node crc\\\\nI1124 17:49:52.620267 6753 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nF1124 17:49:52.620326 6753 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.108262 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.112954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.112998 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.113007 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.113022 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.113032 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.121598 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.135384 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.145374 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.156174 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.167137 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.181140 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.192273 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.201379 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.211729 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.214961 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.215012 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.215025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.215041 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.215400 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.221775 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8ba4510-20bc-4c65-a72d-6cdf0fd554aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6529a81232f3f57051a0982e935a142238a75bc8f2e555d58c7f1422e48c8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.230483 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.239872 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.249844 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.265917 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.276684 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.317533 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.317578 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.317592 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.317609 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.317621 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.419595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.419636 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.419645 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.419659 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.419668 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.521857 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.521899 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.521909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.521922 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.521930 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.624478 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.624529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.624546 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.624564 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.624574 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.648002 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.648148 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.648149 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.648246 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:53 crc kubenswrapper[4702]: E1124 17:49:53.648324 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:53 crc kubenswrapper[4702]: E1124 17:49:53.648246 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:53 crc kubenswrapper[4702]: E1124 17:49:53.648405 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:53 crc kubenswrapper[4702]: E1124 17:49:53.648490 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.726965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.727011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.727024 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.727041 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.727051 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.829450 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.829484 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.829494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.829507 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.829515 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.932164 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.932200 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.932211 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.932268 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:53 crc kubenswrapper[4702]: I1124 17:49:53.932283 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:53Z","lastTransitionTime":"2025-11-24T17:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.034116 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.034158 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.034167 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.034180 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.034190 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.062174 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/3.log" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.067042 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:49:54 crc kubenswrapper[4702]: E1124 17:49:54.067380 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.079959 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d4133ba-866b-4adf-ae87-558bc9174d98\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bf6a419092b2014781f13df45a0f91faec522c243f7469b70e118fc40d0ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://922fd974d7134fca7f8f720e4bbe6c344d43a54d464ea3c0cf91225054f232f7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://658a5d0a5deaac0666e303b2470d03aab0ac8361adcec7947e6b16b0cc65a85c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://513fd37628d7a1788d5cd5693befa20786d9b0ddb189872e2c98bfe758cc718e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0cd762ff7693db46e42322e9ccced145da8869209f8f95258aa61a4e7f437fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 17:48:45.422407 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:48:45.423171 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2660826877/tls.crt::/tmp/serving-cert-2660826877/tls.key\\\\\\\"\\\\nI1124 17:48:51.114162 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:48:51.120598 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:48:51.120627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:48:51.120650 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:48:51.120657 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:48:51.126393 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 17:48:51.126422 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126426 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:48:51.126430 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:48:51.126434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:48:51.126438 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:48:51.126441 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 17:48:51.126492 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1124 17:48:51.130239 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f57d8ac6ee7d118d4c2b665d944a7846a62d2595eebd43d669c19bab7d307551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c09845b60fd192b2508fdf503780a866f0ad24addc57ce6518d0602a763df4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.093240 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0d377ccbb9f52a2edb56c163d46188f998007daaf0f9d4e899800b90724a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.106710 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.125155 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4b86a8-9180-41ee-b240-0071bdc994da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:52Z\\\",\\\"message\\\":\\\" 6753 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-8g6cn\\\\nI1124 17:49:52.620010 6753 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1124 17:49:52.620238 6753 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-8g6cn in node crc\\\\nI1124 17:49:52.620243 6753 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-8g6cn after 0 failed attempt(s)\\\\nI1124 17:49:52.620248 6753 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-8g6cn\\\\nI1124 17:49:52.620099 6753 obj_retry.go:365] Adding new object: *v1.Pod openshift-image-registry/node-ca-xprzv\\\\nI1124 17:49:52.620263 6753 ovn.go:134] Ensuring zone local for Pod openshift-image-registry/node-ca-xprzv in node crc\\\\nI1124 17:49:52.620267 6753 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nF1124 17:49:52.620326 6753 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:49:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6wkcd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-f5g6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136407 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136431 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136446 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.136873 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.148467 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8g6cn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4859751-212a-4d94-b0c7-875b1da99cd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:49:39Z\\\",\\\"message\\\":\\\"2025-11-24T17:48:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf\\\\n2025-11-24T17:48:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ae970b30-d96a-4a5e-b9a5-f28fb72e3ebf to /host/opt/cni/bin/\\\\n2025-11-24T17:48:54Z [verbose] multus-daemon started\\\\n2025-11-24T17:48:54Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:49:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f4rff\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8g6cn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.162377 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4fc11b-9bbd-42a2-9472-0c486ca426b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aafb08e3cb67cf9cb0a7953545d0b9074c3b31aa332539112d58de193317b431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe08b83417279721ed704768d7f1af53f841598b66eef7484b5587533ef4b3b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfc069f0393240b501509945f994efc31dfcb0a6ff717da8eb3f5952c5627228\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78e1bdf7e41b1881cc739dabd7444484cd44296d2a7b3f164539b93a3c5f7d99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf426c48a4f97fa2e629e5edd4210ebd03301799e710e46b7e6bf026720308c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a83eb000364c0d1d13f1ec30ca8252a037fc5ba8941a26a672effa681722494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91c0473dec5421877d26a28b6defaa9994922cca4b71bc47255eca09c3ee8498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hp9c6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wtx9m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.172680 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c28d90e3-ab19-480f-989e-3e49d1289b7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghwsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wkxgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.184099 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"594fc8e5-4dbb-42d3-b16e-7fa474b333ce\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bdfa6651ea4f9e48e6c2dbaca364a6d3561795efe0a5d26eec91acac709610d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093f820d967e9b2fee6c8e626a1187ef2f36d066f4065f80d3d9741b2bd1f2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb9cbeb0fa667f9373aee5b903e9af36e963881af47ca8a6afb5097623401bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8f756e68159ed9a6e6a3e90798f9f99b8af23db4fee4ce9640644118b9c93f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.194202 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8ba4510-20bc-4c65-a72d-6cdf0fd554aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6529a81232f3f57051a0982e935a142238a75bc8f2e555d58c7f1422e48c8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2784b7eaf2a7bae0a43a7a4fdddccc60b54388e79d3e6fb941c398552a8d13a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:48:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.204913 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6325df57a818ace22001d37f3dcd005905b9ce7c9547e78be0cb27fa8e1afbe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83563417f9cf3740680d11610aaf6a4f251eeab498788292bcc43eaf61cb0b0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.217112 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.228038 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xprzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57582220-95af-4697-9fa9-76e9cf03c15a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26d7ee3c52bbe49fe8b225f9e87f8265730fb8d43833bcc69969b0abe63a3fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kxgp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xprzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.238920 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.238957 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.238965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.238979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.238990 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.239773 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88140af5-30b3-4fa7-9ed9-88315ec8a707\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15c46ee3640210a791b5e5915466aef234637910bd1f02caa7a20da43cf62296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://321908d172b84b0e4c9da80ec32ed8f8e1019f8fe360392f6dc7e7004f3ee569\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18737ef3c4f5a409a00ff0251376bc133cb4218e265d90bf50cf3f396f67371\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e2a21107cf13ed2192c42e615f4e30322bccdf2bbc40673013ebd9409fade8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:31Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.250988 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ef1ed9e033f026b07421f87dbf9603683936145c62d1060c1bd575f32488ffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.259993 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-swrxh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72362b16-8d6f-45db-abfa-a416b2d8d60c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2445105136759cc675e8ccf2eaa99ca3b19efa0c7aaf8a1dfd163c0c15bb76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhkh8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-swrxh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.271661 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a77fa32-4f49-4b02-ac4a-fbad4d33e499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:48:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://edab62385de09d94e18755acda562bfafa9616f04431d3dc623b158e8ead34fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nsh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:48:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wmjst\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.281921 4702 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3bd7cdb-7d41-4a5a-8e56-3bb3e48af13d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d47fb9638f43d16500e94d930fee48e3c74cb289d5392279d0d5fe81fa4a64df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5135fe5572cea76acff952914f89f3341dabe5b1af851eef41fcdbc4ded911e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-528ng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:49:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6vrt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.341639 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.341693 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.341703 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.341720 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.341732 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.444155 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.444188 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.444200 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.444216 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.444225 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.547858 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.547897 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.547906 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.547927 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.547937 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.650440 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.650480 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.650488 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.650504 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.650513 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.753212 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.753253 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.753265 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.753282 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.753293 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.855615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.855663 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.855674 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.855692 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.855705 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.957536 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.957581 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.957590 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.957606 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:54 crc kubenswrapper[4702]: I1124 17:49:54.957615 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:54Z","lastTransitionTime":"2025-11-24T17:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.060286 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.060342 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.060351 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.060366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.060375 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.162430 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.162466 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.162477 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.162491 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.162500 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.265310 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.265347 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.265355 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.265372 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.265381 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.368676 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.368719 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.368733 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.368750 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.368761 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.409606 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.409782 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.40975295 +0000 UTC m=+148.650494114 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.471511 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.471544 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.471553 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.471568 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.471576 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.510434 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.510482 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.510503 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.510530 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510652 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510651 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510676 4702 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510776 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.510756262 +0000 UTC m=+148.751497426 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510703 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510838 4702 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510884 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.510871426 +0000 UTC m=+148.751612590 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510668 4702 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510902 4702 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510922 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.510916937 +0000 UTC m=+148.751658101 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510650 4702 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.510951 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.510945268 +0000 UTC m=+148.751686432 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.574349 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.574392 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.574402 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.574417 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.574430 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.647849 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.647896 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.647980 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.647876 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.648050 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.648136 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.648208 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:55 crc kubenswrapper[4702]: E1124 17:49:55.648323 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.677062 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.677112 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.677122 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.677166 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.677176 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.781125 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.781229 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.781256 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.781297 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.781323 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.883492 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.883534 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.883543 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.883557 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.883570 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.986243 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.986302 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.986313 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.986330 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:55 crc kubenswrapper[4702]: I1124 17:49:55.986341 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:55Z","lastTransitionTime":"2025-11-24T17:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.089011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.089051 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.089060 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.089075 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.089089 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.191969 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.192006 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.192017 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.192033 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.192046 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.294068 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.294123 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.294134 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.294150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.294163 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.396257 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.396303 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.396313 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.396331 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.396343 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.499084 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.499525 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.499534 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.499549 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.499559 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.602343 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.602390 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.602399 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.602416 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.602425 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.704347 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.704411 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.704423 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.704439 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.704453 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.806950 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.807032 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.807053 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.807086 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.807108 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.909712 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.909822 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.909837 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.909852 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:56 crc kubenswrapper[4702]: I1124 17:49:56.909863 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:56Z","lastTransitionTime":"2025-11-24T17:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.012101 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.012151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.012162 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.012181 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.012198 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.114676 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.114717 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.114727 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.114746 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.114758 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.217221 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.217263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.217275 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.217291 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.217301 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.320318 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.320355 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.320366 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.320380 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.320389 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.423388 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.423438 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.423449 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.423469 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.423483 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.526972 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.527009 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.527021 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.527039 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.527051 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.628968 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.629000 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.629010 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.629023 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.629032 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.647922 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.647920 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.648190 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.648222 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:57 crc kubenswrapper[4702]: E1124 17:49:57.648366 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:57 crc kubenswrapper[4702]: E1124 17:49:57.648700 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:57 crc kubenswrapper[4702]: E1124 17:49:57.648774 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:57 crc kubenswrapper[4702]: E1124 17:49:57.648892 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.731122 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.731167 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.731210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.731226 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.731238 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.833887 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.833927 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.833936 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.833950 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.833960 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.936922 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.936974 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.936986 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.937005 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:57 crc kubenswrapper[4702]: I1124 17:49:57.937017 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:57Z","lastTransitionTime":"2025-11-24T17:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.039442 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.039489 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.039500 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.039515 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.039525 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.142549 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.142593 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.142606 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.142624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.142636 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.246037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.246085 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.246102 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.246121 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.246136 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.349953 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.349993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.350003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.350017 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.350027 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.451911 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.451951 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.451963 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.451979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.451991 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.554965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.555047 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.555065 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.555096 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.555117 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.658457 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.658503 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.658515 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.658538 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.658550 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.762161 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.762224 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.762244 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.762278 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.762305 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.866014 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.866089 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.866108 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.866138 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.866158 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.969894 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.969946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.969958 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.969980 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:58 crc kubenswrapper[4702]: I1124 17:49:58.969997 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:58Z","lastTransitionTime":"2025-11-24T17:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.073381 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.073435 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.073451 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.073474 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.073491 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.177246 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.177313 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.177324 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.177339 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.177350 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.281171 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.281234 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.281245 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.281265 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.281277 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.383207 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.383263 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.383272 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.383286 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.383295 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.485158 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.485196 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.485204 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.485218 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.485227 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.486065 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.486085 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.486093 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.486102 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.486125 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.497714 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.501698 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.501762 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.501779 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.501833 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.501855 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.517348 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.521168 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.521231 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.521245 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.521267 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.521281 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.534350 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.538461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.538493 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.538507 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.538526 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.538542 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.550583 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.553844 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.553881 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.553891 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.553908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.553921 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.566082 4702 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d33a4b2-a511-45b9-8f09-968167e4e730\\\",\\\"systemUUID\\\":\\\"cad111b8-871f-4060-8514-4607c81be6e2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:49:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.566192 4702 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.587323 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.587388 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.587403 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.587427 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.587443 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.647088 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.647225 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.647252 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.647096 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.647276 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.647489 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.647554 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:49:59 crc kubenswrapper[4702]: E1124 17:49:59.647634 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.690564 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.690624 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.690637 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.690657 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.690669 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.793333 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.793396 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.793409 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.793427 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.793440 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.895766 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.895827 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.895842 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.895859 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.895870 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.997923 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.997957 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.997965 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.997980 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:49:59 crc kubenswrapper[4702]: I1124 17:49:59.997992 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:49:59Z","lastTransitionTime":"2025-11-24T17:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.099954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.099984 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.099994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.100009 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.100020 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.202924 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.202984 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.203000 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.203018 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.203031 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.305614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.305650 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.305660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.305676 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.305690 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.408665 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.408745 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.408754 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.408773 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.408783 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.511703 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.511742 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.511750 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.511763 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.511774 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.614557 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.614639 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.614652 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.614671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.614682 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.717216 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.717266 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.717281 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.717301 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.717314 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.819530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.819772 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.819871 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.819966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.820080 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.923064 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.923112 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.923124 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.923139 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:00 crc kubenswrapper[4702]: I1124 17:50:00.923151 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:00Z","lastTransitionTime":"2025-11-24T17:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.025566 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.025595 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.025603 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.025615 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.025623 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.127953 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.127985 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.127994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.128009 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.128020 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.230288 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.230346 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.230365 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.230381 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.230392 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.333187 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.333232 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.333244 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.333260 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.333272 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.435287 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.435333 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.435342 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.435357 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.435367 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.538330 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.538372 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.538390 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.538411 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.538423 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.641583 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.641610 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.641617 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.641630 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.641639 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.648112 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:01 crc kubenswrapper[4702]: E1124 17:50:01.648208 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.648265 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.648387 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.648576 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:01 crc kubenswrapper[4702]: E1124 17:50:01.649051 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:01 crc kubenswrapper[4702]: E1124 17:50:01.649343 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:01 crc kubenswrapper[4702]: E1124 17:50:01.649420 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.698324 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=70.6982813 podStartE2EDuration="1m10.6982813s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.674845825 +0000 UTC m=+90.915586999" watchObservedRunningTime="2025-11-24 17:50:01.6982813 +0000 UTC m=+90.939022474" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.743854 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.743898 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.743909 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.743925 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.743939 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.817921 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-8g6cn" podStartSLOduration=70.817900127 podStartE2EDuration="1m10.817900127s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.784536979 +0000 UTC m=+91.025278153" watchObservedRunningTime="2025-11-24 17:50:01.817900127 +0000 UTC m=+91.058641291" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.844931 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-wtx9m" podStartSLOduration=70.844908765 podStartE2EDuration="1m10.844908765s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.821743867 +0000 UTC m=+91.062485031" watchObservedRunningTime="2025-11-24 17:50:01.844908765 +0000 UTC m=+91.085649939" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.845911 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.845947 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.845958 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.845972 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.845984 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.876887 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=40.876865038 podStartE2EDuration="40.876865038s" podCreationTimestamp="2025-11-24 17:49:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.858082398 +0000 UTC m=+91.098823562" watchObservedRunningTime="2025-11-24 17:50:01.876865038 +0000 UTC m=+91.117606212" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.877026 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=17.877019303 podStartE2EDuration="17.877019303s" podCreationTimestamp="2025-11-24 17:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.876545518 +0000 UTC m=+91.117286692" watchObservedRunningTime="2025-11-24 17:50:01.877019303 +0000 UTC m=+91.117760457" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.946547 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=67.946518135 podStartE2EDuration="1m7.946518135s" podCreationTimestamp="2025-11-24 17:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.946110023 +0000 UTC m=+91.186851187" watchObservedRunningTime="2025-11-24 17:50:01.946518135 +0000 UTC m=+91.187259309" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.947568 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-xprzv" podStartSLOduration=70.947540327 podStartE2EDuration="1m10.947540327s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.924820384 +0000 UTC m=+91.165561548" watchObservedRunningTime="2025-11-24 17:50:01.947540327 +0000 UTC m=+91.188281491" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.947959 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.948023 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.948037 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.948059 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.948078 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:01Z","lastTransitionTime":"2025-11-24T17:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.969201 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-swrxh" podStartSLOduration=70.969180417 podStartE2EDuration="1m10.969180417s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.968612069 +0000 UTC m=+91.209353233" watchObservedRunningTime="2025-11-24 17:50:01.969180417 +0000 UTC m=+91.209921581" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.982509 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podStartSLOduration=70.982485085 podStartE2EDuration="1m10.982485085s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.981638188 +0000 UTC m=+91.222379362" watchObservedRunningTime="2025-11-24 17:50:01.982485085 +0000 UTC m=+91.223226269" Nov 24 17:50:01 crc kubenswrapper[4702]: I1124 17:50:01.993462 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6vrt9" podStartSLOduration=70.993441149 podStartE2EDuration="1m10.993441149s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:01.992745206 +0000 UTC m=+91.233486360" watchObservedRunningTime="2025-11-24 17:50:01.993441149 +0000 UTC m=+91.234182313" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.049912 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.049968 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.049982 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.050000 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.050012 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.151723 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.151753 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.151762 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.151776 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.151785 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.254128 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.254162 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.254171 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.254183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.254192 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.356395 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.356431 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.356439 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.356454 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.356464 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.458838 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.458874 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.458885 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.458901 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.458911 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.561461 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.561514 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.561531 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.561551 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.561562 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.664079 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.664134 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.664150 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.664163 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.664172 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.766945 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.766994 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.767003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.767017 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.767026 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.869489 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.869530 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.869542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.869557 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.869567 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.971639 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.971688 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.971697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.971720 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:02 crc kubenswrapper[4702]: I1124 17:50:02.971734 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:02Z","lastTransitionTime":"2025-11-24T17:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.074503 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.074542 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.074554 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.074570 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.074581 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.176968 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.177001 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.177011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.177025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.177034 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.279550 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.279601 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.279609 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.279625 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.279635 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.382765 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.382858 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.382873 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.382897 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.382910 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.486714 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.486783 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.486814 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.486839 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.486854 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.590210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.590278 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.590287 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.590304 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.590316 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.648239 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.648296 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.648318 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.648336 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:03 crc kubenswrapper[4702]: E1124 17:50:03.648477 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:03 crc kubenswrapper[4702]: E1124 17:50:03.648592 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:03 crc kubenswrapper[4702]: E1124 17:50:03.648681 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:03 crc kubenswrapper[4702]: E1124 17:50:03.648743 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.692651 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.692707 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.692718 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.692736 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.692747 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.795358 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.795395 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.795403 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.795417 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.795427 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.898434 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.898485 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.898500 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.898523 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:03 crc kubenswrapper[4702]: I1124 17:50:03.898539 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:03Z","lastTransitionTime":"2025-11-24T17:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.000640 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.000736 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.000749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.000769 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.000781 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.102671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.102757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.102771 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.102790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.102825 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.205497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.205559 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.205571 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.205594 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.205608 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.309547 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.309632 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.309654 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.309684 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.309704 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.412670 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.412737 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.412752 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.412772 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.412784 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.516161 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.516241 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.516265 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.516297 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.516316 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.620304 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.620368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.620387 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.620424 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.620460 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.723765 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.723833 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.723847 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.723867 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.723879 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.826926 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.826979 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.826990 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.827010 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.827022 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.929947 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.930003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.930015 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.930034 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:04 crc kubenswrapper[4702]: I1124 17:50:04.930048 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:04Z","lastTransitionTime":"2025-11-24T17:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.033006 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.033042 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.033052 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.033070 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.033082 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.135666 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.135711 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.135720 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.135738 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.135749 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.238329 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.238368 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.238411 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.238432 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.238467 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.341320 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.341371 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.341382 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.341398 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.341411 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.443832 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.443866 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.443879 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.443894 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.443908 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.546891 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.546938 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.546946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.546962 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.546974 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.647671 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.647738 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.647682 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.647673 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:05 crc kubenswrapper[4702]: E1124 17:50:05.647835 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:05 crc kubenswrapper[4702]: E1124 17:50:05.647923 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:05 crc kubenswrapper[4702]: E1124 17:50:05.648128 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:05 crc kubenswrapper[4702]: E1124 17:50:05.648215 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.649080 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.649118 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.649130 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.649145 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.649158 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.752309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.752336 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.752344 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.752356 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.752365 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.854609 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.854653 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.854661 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.854680 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.854698 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.957614 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.957655 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.957667 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.957684 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:05 crc kubenswrapper[4702]: I1124 17:50:05.957696 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:05Z","lastTransitionTime":"2025-11-24T17:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.060732 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.060790 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.060830 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.060853 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.060871 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.164099 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.164194 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.164210 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.164239 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.164260 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.266444 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.266526 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.266544 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.266576 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.266594 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.369782 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.369872 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.369885 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.369908 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.369922 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.473063 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.473108 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.473147 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.473183 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.473219 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.576622 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.576686 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.576700 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.576724 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.576739 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.680619 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.680677 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.680697 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.680733 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.680753 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.784793 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.784854 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.784867 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.784881 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.784892 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.887913 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.887946 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.887954 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.887966 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.887975 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.991420 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.991470 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.991480 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.991497 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:06 crc kubenswrapper[4702]: I1124 17:50:06.991507 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:06Z","lastTransitionTime":"2025-11-24T17:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.094349 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.094859 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.095099 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.095220 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.095326 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.197719 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.197755 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.197768 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.197784 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.197814 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.301629 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.301683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.301695 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.301713 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.301725 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.404948 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.404993 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.405003 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.405017 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.405026 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.508597 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.508683 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.508706 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.508729 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.508744 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.611141 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.611184 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.611221 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.611238 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.611249 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.647326 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.647398 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.647367 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.647569 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:07 crc kubenswrapper[4702]: E1124 17:50:07.647554 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:07 crc kubenswrapper[4702]: E1124 17:50:07.647701 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:07 crc kubenswrapper[4702]: E1124 17:50:07.647879 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:07 crc kubenswrapper[4702]: E1124 17:50:07.647947 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.714660 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.714732 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.714749 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.714769 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.714786 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.817476 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.817529 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.817541 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.817558 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.817570 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.920494 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.920532 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.920541 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.920556 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:07 crc kubenswrapper[4702]: I1124 17:50:07.920569 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:07Z","lastTransitionTime":"2025-11-24T17:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.023085 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.023122 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.023134 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.023151 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.023163 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.125459 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.125498 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.125509 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.125525 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.125536 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.227696 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.227735 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.227744 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.227757 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.227775 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.329949 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.329989 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.330004 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.330025 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.330039 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.432591 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.432640 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.432653 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.432671 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.432682 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.535162 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.535228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.535250 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.535280 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.535299 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.637309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.637342 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.637349 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.637363 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.637371 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.648921 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:50:08 crc kubenswrapper[4702]: E1124 17:50:08.649105 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.740185 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.740257 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.740266 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.740279 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.740289 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.842134 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.842176 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.842186 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.842200 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.842209 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.944881 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.944913 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.944921 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.944935 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:08 crc kubenswrapper[4702]: I1124 17:50:08.944944 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:08Z","lastTransitionTime":"2025-11-24T17:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.046739 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.046775 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.046786 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.046831 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.046843 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.149520 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.149839 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.149948 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.150034 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.150120 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.252482 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.252536 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.252548 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.252564 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.252579 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.355634 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.355667 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.355675 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.355687 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.355695 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.458932 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.459011 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.459043 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.459076 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.459101 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.459266 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.459385 4702 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.459433 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs podName:c28d90e3-ab19-480f-989e-3e49d1289b7a nodeName:}" failed. No retries permitted until 2025-11-24 17:51:13.459416142 +0000 UTC m=+162.700157326 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs") pod "network-metrics-daemon-wkxgm" (UID: "c28d90e3-ab19-480f-989e-3e49d1289b7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.562228 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.562287 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.562296 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.562326 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.562342 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.647502 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.647626 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.647643 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.647748 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.647723 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.647885 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.648703 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:09 crc kubenswrapper[4702]: E1124 17:50:09.649877 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.664309 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.664372 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.664385 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.664403 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.664415 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.767505 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.767563 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.767578 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.767602 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.767615 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.807943 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.808006 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.808018 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.808038 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.808051 4702 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:50:09Z","lastTransitionTime":"2025-11-24T17:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.846912 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b"] Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.847319 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.849400 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.849492 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.849779 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.850865 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.965772 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4ad67280-d310-43f4-9d06-f948e7b6b5e7-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.965832 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ad67280-d310-43f4-9d06-f948e7b6b5e7-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.965867 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.965892 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:09 crc kubenswrapper[4702]: I1124 17:50:09.965915 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4ad67280-d310-43f4-9d06-f948e7b6b5e7-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067453 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4ad67280-d310-43f4-9d06-f948e7b6b5e7-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067526 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ad67280-d310-43f4-9d06-f948e7b6b5e7-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067582 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067648 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067697 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4ad67280-d310-43f4-9d06-f948e7b6b5e7-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067832 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.067887 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4ad67280-d310-43f4-9d06-f948e7b6b5e7-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.069101 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4ad67280-d310-43f4-9d06-f948e7b6b5e7-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.074290 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ad67280-d310-43f4-9d06-f948e7b6b5e7-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.088513 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4ad67280-d310-43f4-9d06-f948e7b6b5e7-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mzd2b\" (UID: \"4ad67280-d310-43f4-9d06-f948e7b6b5e7\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: I1124 17:50:10.163404 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" Nov 24 17:50:10 crc kubenswrapper[4702]: W1124 17:50:10.177317 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ad67280_d310_43f4_9d06_f948e7b6b5e7.slice/crio-a265c98d2a56811be1790ba3ed0b2fed4d77800442849a9c940b297238efd812 WatchSource:0}: Error finding container a265c98d2a56811be1790ba3ed0b2fed4d77800442849a9c940b297238efd812: Status 404 returned error can't find the container with id a265c98d2a56811be1790ba3ed0b2fed4d77800442849a9c940b297238efd812 Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.121097 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" event={"ID":"4ad67280-d310-43f4-9d06-f948e7b6b5e7","Type":"ContainerStarted","Data":"4770700e0242e5d33fb885a53a344edf32f30d01a764e736630a07ddbdc0092a"} Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.121153 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" event={"ID":"4ad67280-d310-43f4-9d06-f948e7b6b5e7","Type":"ContainerStarted","Data":"a265c98d2a56811be1790ba3ed0b2fed4d77800442849a9c940b297238efd812"} Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.134652 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mzd2b" podStartSLOduration=80.134634855 podStartE2EDuration="1m20.134634855s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:11.133998736 +0000 UTC m=+100.374739920" watchObservedRunningTime="2025-11-24 17:50:11.134634855 +0000 UTC m=+100.375376029" Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.647961 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.649011 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.649057 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:11 crc kubenswrapper[4702]: I1124 17:50:11.649088 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:11 crc kubenswrapper[4702]: E1124 17:50:11.649168 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:11 crc kubenswrapper[4702]: E1124 17:50:11.649228 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:11 crc kubenswrapper[4702]: E1124 17:50:11.649320 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:11 crc kubenswrapper[4702]: E1124 17:50:11.649396 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:13 crc kubenswrapper[4702]: I1124 17:50:13.647233 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:13 crc kubenswrapper[4702]: E1124 17:50:13.647431 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:13 crc kubenswrapper[4702]: I1124 17:50:13.647474 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:13 crc kubenswrapper[4702]: I1124 17:50:13.647490 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:13 crc kubenswrapper[4702]: I1124 17:50:13.647661 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:13 crc kubenswrapper[4702]: E1124 17:50:13.647734 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:13 crc kubenswrapper[4702]: E1124 17:50:13.647867 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:13 crc kubenswrapper[4702]: E1124 17:50:13.647910 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:13 crc kubenswrapper[4702]: I1124 17:50:13.664200 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 24 17:50:15 crc kubenswrapper[4702]: I1124 17:50:15.647656 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:15 crc kubenswrapper[4702]: I1124 17:50:15.647703 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:15 crc kubenswrapper[4702]: I1124 17:50:15.647846 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:15 crc kubenswrapper[4702]: E1124 17:50:15.647886 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:15 crc kubenswrapper[4702]: I1124 17:50:15.647888 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:15 crc kubenswrapper[4702]: E1124 17:50:15.648036 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:15 crc kubenswrapper[4702]: E1124 17:50:15.648104 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:15 crc kubenswrapper[4702]: E1124 17:50:15.648147 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:17 crc kubenswrapper[4702]: I1124 17:50:17.647347 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:17 crc kubenswrapper[4702]: I1124 17:50:17.647400 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:17 crc kubenswrapper[4702]: I1124 17:50:17.647470 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:17 crc kubenswrapper[4702]: I1124 17:50:17.647355 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:17 crc kubenswrapper[4702]: E1124 17:50:17.647541 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:17 crc kubenswrapper[4702]: E1124 17:50:17.647632 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:17 crc kubenswrapper[4702]: E1124 17:50:17.647716 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:17 crc kubenswrapper[4702]: E1124 17:50:17.647849 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:19 crc kubenswrapper[4702]: I1124 17:50:19.648126 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:19 crc kubenswrapper[4702]: I1124 17:50:19.648157 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:19 crc kubenswrapper[4702]: I1124 17:50:19.648193 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:19 crc kubenswrapper[4702]: E1124 17:50:19.648256 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:19 crc kubenswrapper[4702]: I1124 17:50:19.648290 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:19 crc kubenswrapper[4702]: E1124 17:50:19.648341 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:19 crc kubenswrapper[4702]: E1124 17:50:19.648407 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:19 crc kubenswrapper[4702]: E1124 17:50:19.648694 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:19 crc kubenswrapper[4702]: I1124 17:50:19.649008 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:50:19 crc kubenswrapper[4702]: E1124 17:50:19.649133 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-f5g6n_openshift-ovn-kubernetes(0d4b86a8-9180-41ee-b240-0071bdc994da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" Nov 24 17:50:21 crc kubenswrapper[4702]: I1124 17:50:21.647763 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:21 crc kubenswrapper[4702]: I1124 17:50:21.647855 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:21 crc kubenswrapper[4702]: I1124 17:50:21.647787 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:21 crc kubenswrapper[4702]: I1124 17:50:21.656067 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:21 crc kubenswrapper[4702]: E1124 17:50:21.656059 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:21 crc kubenswrapper[4702]: E1124 17:50:21.656247 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:21 crc kubenswrapper[4702]: E1124 17:50:21.656313 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:21 crc kubenswrapper[4702]: E1124 17:50:21.656459 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:21 crc kubenswrapper[4702]: I1124 17:50:21.693174 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=8.693123005 podStartE2EDuration="8.693123005s" podCreationTimestamp="2025-11-24 17:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:21.6926335 +0000 UTC m=+110.933374684" watchObservedRunningTime="2025-11-24 17:50:21.693123005 +0000 UTC m=+110.933864179" Nov 24 17:50:23 crc kubenswrapper[4702]: I1124 17:50:23.647106 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:23 crc kubenswrapper[4702]: I1124 17:50:23.647135 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:23 crc kubenswrapper[4702]: I1124 17:50:23.647127 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:23 crc kubenswrapper[4702]: I1124 17:50:23.647111 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:23 crc kubenswrapper[4702]: E1124 17:50:23.647261 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:23 crc kubenswrapper[4702]: E1124 17:50:23.647330 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:23 crc kubenswrapper[4702]: E1124 17:50:23.647412 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:23 crc kubenswrapper[4702]: E1124 17:50:23.647496 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:25 crc kubenswrapper[4702]: I1124 17:50:25.647847 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:25 crc kubenswrapper[4702]: I1124 17:50:25.647875 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:25 crc kubenswrapper[4702]: E1124 17:50:25.648014 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:25 crc kubenswrapper[4702]: I1124 17:50:25.648028 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:25 crc kubenswrapper[4702]: I1124 17:50:25.648078 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:25 crc kubenswrapper[4702]: E1124 17:50:25.648090 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:25 crc kubenswrapper[4702]: E1124 17:50:25.648160 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:25 crc kubenswrapper[4702]: E1124 17:50:25.648253 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.167889 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/1.log" Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.168634 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/0.log" Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.168683 4702 generic.go:334] "Generic (PLEG): container finished" podID="f4859751-212a-4d94-b0c7-875b1da99cd8" containerID="d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e" exitCode=1 Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.168714 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerDied","Data":"d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e"} Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.168747 4702 scope.go:117] "RemoveContainer" containerID="87ac4e8c2a96cdd074c5573e68425253ee10fb71e7c6c868059427b99f5d78a1" Nov 24 17:50:26 crc kubenswrapper[4702]: I1124 17:50:26.169179 4702 scope.go:117] "RemoveContainer" containerID="d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e" Nov 24 17:50:26 crc kubenswrapper[4702]: E1124 17:50:26.169346 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-8g6cn_openshift-multus(f4859751-212a-4d94-b0c7-875b1da99cd8)\"" pod="openshift-multus/multus-8g6cn" podUID="f4859751-212a-4d94-b0c7-875b1da99cd8" Nov 24 17:50:27 crc kubenswrapper[4702]: I1124 17:50:27.172416 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/1.log" Nov 24 17:50:27 crc kubenswrapper[4702]: I1124 17:50:27.647123 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:27 crc kubenswrapper[4702]: I1124 17:50:27.647173 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:27 crc kubenswrapper[4702]: I1124 17:50:27.647174 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:27 crc kubenswrapper[4702]: I1124 17:50:27.647134 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:27 crc kubenswrapper[4702]: E1124 17:50:27.647245 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:27 crc kubenswrapper[4702]: E1124 17:50:27.647340 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:27 crc kubenswrapper[4702]: E1124 17:50:27.647395 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:27 crc kubenswrapper[4702]: E1124 17:50:27.647435 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:29 crc kubenswrapper[4702]: I1124 17:50:29.647919 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:29 crc kubenswrapper[4702]: I1124 17:50:29.648001 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:29 crc kubenswrapper[4702]: I1124 17:50:29.648061 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:29 crc kubenswrapper[4702]: I1124 17:50:29.648015 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:29 crc kubenswrapper[4702]: E1124 17:50:29.648274 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:29 crc kubenswrapper[4702]: E1124 17:50:29.648400 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:29 crc kubenswrapper[4702]: E1124 17:50:29.648446 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:29 crc kubenswrapper[4702]: E1124 17:50:29.648522 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:31 crc kubenswrapper[4702]: I1124 17:50:31.647816 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:31 crc kubenswrapper[4702]: I1124 17:50:31.647844 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:31 crc kubenswrapper[4702]: I1124 17:50:31.647849 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:31 crc kubenswrapper[4702]: I1124 17:50:31.647951 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.648863 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.649047 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.649153 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.649107 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.666375 4702 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 24 17:50:31 crc kubenswrapper[4702]: E1124 17:50:31.725943 4702 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:50:33 crc kubenswrapper[4702]: I1124 17:50:33.648125 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:33 crc kubenswrapper[4702]: I1124 17:50:33.648125 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:33 crc kubenswrapper[4702]: E1124 17:50:33.649192 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:33 crc kubenswrapper[4702]: I1124 17:50:33.648234 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:33 crc kubenswrapper[4702]: I1124 17:50:33.648164 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:33 crc kubenswrapper[4702]: E1124 17:50:33.649379 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:33 crc kubenswrapper[4702]: E1124 17:50:33.649296 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:33 crc kubenswrapper[4702]: E1124 17:50:33.649894 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:34 crc kubenswrapper[4702]: I1124 17:50:34.649218 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.201490 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/3.log" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.205171 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerStarted","Data":"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6"} Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.205591 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.576514 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podStartSLOduration=104.576469744 podStartE2EDuration="1m44.576469744s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:35.245646976 +0000 UTC m=+124.486388150" watchObservedRunningTime="2025-11-24 17:50:35.576469744 +0000 UTC m=+124.817210958" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.577846 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wkxgm"] Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.578063 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:35 crc kubenswrapper[4702]: E1124 17:50:35.578252 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.647388 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.647410 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:35 crc kubenswrapper[4702]: I1124 17:50:35.647451 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:35 crc kubenswrapper[4702]: E1124 17:50:35.647661 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:35 crc kubenswrapper[4702]: E1124 17:50:35.647828 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:35 crc kubenswrapper[4702]: E1124 17:50:35.647885 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:36 crc kubenswrapper[4702]: E1124 17:50:36.727062 4702 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:50:37 crc kubenswrapper[4702]: I1124 17:50:37.647783 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:37 crc kubenswrapper[4702]: I1124 17:50:37.647888 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:37 crc kubenswrapper[4702]: E1124 17:50:37.647931 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:37 crc kubenswrapper[4702]: I1124 17:50:37.647887 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:37 crc kubenswrapper[4702]: I1124 17:50:37.647890 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:37 crc kubenswrapper[4702]: E1124 17:50:37.648109 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:37 crc kubenswrapper[4702]: E1124 17:50:37.648233 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:37 crc kubenswrapper[4702]: E1124 17:50:37.648328 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:37 crc kubenswrapper[4702]: I1124 17:50:37.648659 4702 scope.go:117] "RemoveContainer" containerID="d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e" Nov 24 17:50:38 crc kubenswrapper[4702]: I1124 17:50:38.218376 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/1.log" Nov 24 17:50:38 crc kubenswrapper[4702]: I1124 17:50:38.219000 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerStarted","Data":"fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf"} Nov 24 17:50:39 crc kubenswrapper[4702]: I1124 17:50:39.647139 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:39 crc kubenswrapper[4702]: I1124 17:50:39.647215 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:39 crc kubenswrapper[4702]: E1124 17:50:39.647302 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:39 crc kubenswrapper[4702]: I1124 17:50:39.647329 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:39 crc kubenswrapper[4702]: I1124 17:50:39.647139 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:39 crc kubenswrapper[4702]: E1124 17:50:39.647428 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:39 crc kubenswrapper[4702]: E1124 17:50:39.647584 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:39 crc kubenswrapper[4702]: E1124 17:50:39.647694 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:41 crc kubenswrapper[4702]: I1124 17:50:41.647884 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:41 crc kubenswrapper[4702]: I1124 17:50:41.647992 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:41 crc kubenswrapper[4702]: I1124 17:50:41.648028 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:41 crc kubenswrapper[4702]: E1124 17:50:41.649004 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wkxgm" podUID="c28d90e3-ab19-480f-989e-3e49d1289b7a" Nov 24 17:50:41 crc kubenswrapper[4702]: I1124 17:50:41.649021 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:41 crc kubenswrapper[4702]: E1124 17:50:41.649226 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:50:41 crc kubenswrapper[4702]: E1124 17:50:41.649150 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:50:41 crc kubenswrapper[4702]: E1124 17:50:41.649349 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.647787 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.647787 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.647822 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.648641 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.651924 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.652546 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.652611 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.652719 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.652942 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 24 17:50:43 crc kubenswrapper[4702]: I1124 17:50:43.653130 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.704876 4702 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.740226 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k4zhk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.740905 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.741210 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wq4v5"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.742067 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.743186 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.744605 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.745392 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.745442 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.745497 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.745583 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.746780 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.746898 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.747437 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748290 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748385 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748420 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748499 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748546 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748596 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748709 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748712 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748863 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.748902 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.749633 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.750254 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.751070 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.751140 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.752578 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.753074 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.755298 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.755508 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.755744 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.755892 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.756011 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.756868 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.757166 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.758167 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.758436 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.758619 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.758611 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.765159 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.767621 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.767775 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.768700 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.768787 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.769328 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.769357 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.769636 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.769736 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jzpbc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.778027 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.778933 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.786783 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.787384 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.787693 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-krcnx"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.788048 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.788392 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-fsc9b"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.789567 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.790016 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.790303 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.790547 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.790833 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.791094 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.793583 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.793998 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.794096 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.794650 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-67bvv"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.795198 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.795666 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.794654 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.795956 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.794725 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.796145 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.796234 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.794815 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.795000 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.797074 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.795377 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.799233 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.799949 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.800726 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hw6dk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801333 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801473 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801662 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801745 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801760 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801899 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.802078 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.801956 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.802690 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.802824 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820058 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820105 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820172 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820251 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820309 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820345 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820363 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820423 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820470 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820529 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820105 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820825 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820925 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820991 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821108 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821205 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821283 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821368 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821465 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821534 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821613 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821704 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.820063 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.821906 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822003 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822145 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822178 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822248 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822406 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822616 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822791 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822860 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.822820 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.823703 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.833465 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.860493 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.860746 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.861369 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.861451 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.862104 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.862671 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-78ghc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.862835 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.863136 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.863410 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.863540 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.864308 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.865380 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.865454 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.865527 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-r5tsm"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.865593 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.865815 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.866364 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.873592 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.874278 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.874346 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.874508 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.874596 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.874691 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.875274 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.875516 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.875864 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.876767 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.879896 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.880237 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.880319 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.881710 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.882279 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.882622 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.883244 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s65mg"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.883632 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.884557 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.885204 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.885495 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.885602 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.885911 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.886783 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.887644 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.887647 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.888194 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.893397 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.894118 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.895064 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.895737 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.898545 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.906820 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.907382 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.908672 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.912254 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.912926 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.912933 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gb2f9"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.913033 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.915123 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k4zhk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.915154 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.915172 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fsc9b"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.915191 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.915306 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.917239 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-krcnx"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.918562 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-vwg94"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.919613 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.919852 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.921153 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.922393 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.922421 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.923592 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924080 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdmvs\" (UniqueName: \"kubernetes.io/projected/35419044-f572-4b95-9811-46e6ab51c87a-kube-api-access-fdmvs\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924129 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-service-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924169 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924198 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e74aff5-38e7-42fb-b2dc-172662221443-serving-cert\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924223 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35419044-f572-4b95-9811-46e6ab51c87a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924284 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924310 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924337 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924363 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924390 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3ed1886-2cab-466f-a497-c6d8faa7955f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924432 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924482 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924507 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7ghx\" (UniqueName: \"kubernetes.io/projected/2e74aff5-38e7-42fb-b2dc-172662221443-kube-api-access-q7ghx\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924618 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924648 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924688 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-config\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924697 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924717 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bcgg\" (UniqueName: \"kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924766 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924792 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3ed1886-2cab-466f-a497-c6d8faa7955f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924836 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.924997 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3ed1886-2cab-466f-a497-c6d8faa7955f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.925045 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.925080 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.925131 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.926512 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-67bvv"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.927577 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-78ghc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.928722 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.929747 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.930972 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s65mg"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.932071 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wq4v5"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.933346 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.936334 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.937005 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.938459 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.939725 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jzpbc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.940949 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.942433 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.942536 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.943724 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.945126 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-vwg94"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.946382 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.948143 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.949818 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.951593 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hw6dk"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.953275 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-5bsls"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.955338 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-9swhp"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.955540 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.955768 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.955878 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.956767 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.958015 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.959529 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.960591 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gb2f9"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.962542 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5bsls"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.962887 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.965062 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.966254 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.967447 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.969430 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-gpf8l"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.970210 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.973029 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-gpf8l"] Nov 24 17:50:50 crc kubenswrapper[4702]: I1124 17:50:50.982938 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.002872 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.022870 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026039 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdmvs\" (UniqueName: \"kubernetes.io/projected/35419044-f572-4b95-9811-46e6ab51c87a-kube-api-access-fdmvs\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026149 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-service-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026188 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026304 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e74aff5-38e7-42fb-b2dc-172662221443-serving-cert\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026336 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35419044-f572-4b95-9811-46e6ab51c87a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026365 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026393 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026420 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026447 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026471 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3ed1886-2cab-466f-a497-c6d8faa7955f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026497 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026550 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026571 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7ghx\" (UniqueName: \"kubernetes.io/projected/2e74aff5-38e7-42fb-b2dc-172662221443-kube-api-access-q7ghx\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026595 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026632 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026678 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-config\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026698 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bcgg\" (UniqueName: \"kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026715 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026722 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026739 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3ed1886-2cab-466f-a497-c6d8faa7955f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026817 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-service-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026885 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.026968 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3ed1886-2cab-466f-a497-c6d8faa7955f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.027006 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.027047 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.027104 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.027888 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.028134 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.029380 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-config\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.030320 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.030376 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3ed1886-2cab-466f-a497-c6d8faa7955f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.030284 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.030715 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e74aff5-38e7-42fb-b2dc-172662221443-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032442 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032448 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e74aff5-38e7-42fb-b2dc-172662221443-serving-cert\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032486 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032507 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35419044-f572-4b95-9811-46e6ab51c87a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032573 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.032888 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3ed1886-2cab-466f-a497-c6d8faa7955f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.033534 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.033740 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.033851 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.034491 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.038535 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.046134 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.063550 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.083913 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.104368 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.125112 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.143020 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.163218 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.204610 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.223496 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.243777 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.264938 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.284047 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.304290 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.324284 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.344966 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.363164 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.383531 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.404060 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.430335 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.443475 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.483837 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.503294 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.522869 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.542955 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.563111 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.583081 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.603295 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.624234 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.643881 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.663666 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.683771 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.703714 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.723270 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.743708 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.764587 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.783011 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.810175 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.823183 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.843471 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.863225 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.883548 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.902333 4702 request.go:700] Waited for 1.017421568s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-api/secrets?fieldSelector=metadata.name%3Dcontrol-plane-machine-set-operator-dockercfg-k9rxt&limit=500&resourceVersion=0 Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.904261 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.922875 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.942998 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.963360 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 24 17:50:51 crc kubenswrapper[4702]: I1124 17:50:51.983627 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.003788 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.022733 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.044532 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.063486 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.082583 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.104006 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.123674 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.143755 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.164399 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.184457 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.203230 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.223002 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.242909 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.263263 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.283973 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.304282 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.323169 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.343200 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.363639 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.383727 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.403169 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.422945 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.443773 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.463557 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.482562 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.482629 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.483339 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.502938 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.522616 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.543114 4702 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.563119 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.582835 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.603098 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.623787 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.643156 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.664287 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.683107 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.703007 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.724010 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.742699 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.763485 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.783775 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.817599 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdmvs\" (UniqueName: \"kubernetes.io/projected/35419044-f572-4b95-9811-46e6ab51c87a-kube-api-access-fdmvs\") pod \"cluster-samples-operator-665b6dd947-vpwsc\" (UID: \"35419044-f572-4b95-9811-46e6ab51c87a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.837075 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3ed1886-2cab-466f-a497-c6d8faa7955f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwq6\" (UID: \"f3ed1886-2cab-466f-a497-c6d8faa7955f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.856601 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7ghx\" (UniqueName: \"kubernetes.io/projected/2e74aff5-38e7-42fb-b2dc-172662221443-kube-api-access-q7ghx\") pod \"authentication-operator-69f744f599-jzpbc\" (UID: \"2e74aff5-38e7-42fb-b2dc-172662221443\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.878923 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bcgg\" (UniqueName: \"kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg\") pod \"oauth-openshift-558db77b4-n2bxq\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.955673 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/837cbb40-8ba1-4602-8bab-3457eec318cb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956013 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956134 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e200aa7-865a-4eec-8a41-f2340b878ccc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956255 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-oauth-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956354 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k2mk\" (UniqueName: \"kubernetes.io/projected/ae2f2e1f-429f-4a8b-9556-26910294ab6e-kube-api-access-7k2mk\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956495 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-images\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956606 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-trusted-ca\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956714 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-serving-cert\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956828 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.956926 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-config\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957021 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-service-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957132 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kczwd\" (UniqueName: \"kubernetes.io/projected/e4929a7f-bea6-4417-94f8-3dfdb325719d-kube-api-access-kczwd\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957242 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957346 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q7bw\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-kube-api-access-8q7bw\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957459 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957558 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957647 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957754 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-client\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957851 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.957921 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/82d7d022-5521-4f60-b316-4101099d58ed-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958094 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958230 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcrqc\" (UniqueName: \"kubernetes.io/projected/837cbb40-8ba1-4602-8bab-3457eec318cb-kube-api-access-jcrqc\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958312 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-config\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958385 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-config\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958458 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-image-import-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958584 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-encryption-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958663 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958765 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958887 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.958982 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-auth-proxy-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959087 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959161 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-audit-dir\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959329 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959424 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-policies\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959505 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-audit\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959580 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rhb6\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959654 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-serving-cert\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: E1124 17:50:52.959724 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.459708135 +0000 UTC m=+142.700449519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959758 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66k2k\" (UniqueName: \"kubernetes.io/projected/82d7d022-5521-4f60-b316-4101099d58ed-kube-api-access-66k2k\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959782 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck94w\" (UniqueName: \"kubernetes.io/projected/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-kube-api-access-ck94w\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959826 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-service-ca\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959851 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959869 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.959954 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1c6bb39-a1f1-482a-bc94-824696f2a88b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960041 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c7xw\" (UniqueName: \"kubernetes.io/projected/b1bc1040-fa30-45f9-ab55-54673b3536a2-kube-api-access-9c7xw\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960114 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7vj2\" (UniqueName: \"kubernetes.io/projected/d26d5c3e-d8c6-4460-9b45-1a2c45971be5-kube-api-access-z7vj2\") pod \"downloads-7954f5f757-fsc9b\" (UID: \"d26d5c3e-d8c6-4460-9b45-1a2c45971be5\") " pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960174 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qtb5\" (UniqueName: \"kubernetes.io/projected/4e200aa7-865a-4eec-8a41-f2340b878ccc-kube-api-access-5qtb5\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960225 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnqtk\" (UniqueName: \"kubernetes.io/projected/1fca2414-ce67-496f-a33b-b120986eba4c-kube-api-access-bnqtk\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960270 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960320 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jx8r\" (UniqueName: \"kubernetes.io/projected/89cda856-39cd-46fa-9efe-3df4eb1de216-kube-api-access-9jx8r\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960365 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-serving-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960461 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-trusted-ca-bundle\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960551 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-encryption-config\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960624 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960702 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-machine-approver-tls\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960781 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fd114bf-9629-4462-a3cd-8e856e111e2f-config\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960878 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1c6bb39-a1f1-482a-bc94-824696f2a88b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.960955 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837cbb40-8ba1-4602-8bab-3457eec318cb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.961040 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.961120 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e200aa7-865a-4eec-8a41-f2340b878ccc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.961190 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.961266 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-client\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.961340 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962013 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2f2e1f-429f-4a8b-9556-26910294ab6e-serving-cert\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962117 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-dir\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962198 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgvjb\" (UniqueName: \"kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962299 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lshph\" (UniqueName: \"kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962384 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-oauth-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962456 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-serving-cert\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962531 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962601 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-node-pullsecrets\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962673 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcjjc\" (UniqueName: \"kubernetes.io/projected/7fd114bf-9629-4462-a3cd-8e856e111e2f-kube-api-access-vcjjc\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962815 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.962903 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-client\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.963040 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.963123 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fd114bf-9629-4462-a3cd-8e856e111e2f-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:52 crc kubenswrapper[4702]: I1124 17:50:52.992718 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.028403 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.063680 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.063999 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837cbb40-8ba1-4602-8bab-3457eec318cb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064051 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-client\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064085 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064122 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2f2e1f-429f-4a8b-9556-26910294ab6e-serving-cert\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064164 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5m6v\" (UniqueName: \"kubernetes.io/projected/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-kube-api-access-s5m6v\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064200 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064923 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-dir\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064958 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f870073-6820-465d-9c8c-9d6a39c46b3d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.064996 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lshph\" (UniqueName: \"kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065022 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-stats-auth\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065045 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdjm6\" (UniqueName: \"kubernetes.io/projected/4b165c9e-1c64-4ecd-985a-c345cdb468f2-kube-api-access-pdjm6\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065084 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbsj6\" (UniqueName: \"kubernetes.io/projected/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-kube-api-access-nbsj6\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065105 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-registration-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065130 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hffn\" (UniqueName: \"kubernetes.io/projected/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-kube-api-access-5hffn\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065152 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065194 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065237 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-client\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065259 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnhcj\" (UniqueName: \"kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065282 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-key\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065303 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d5c689a-2e2c-4938-8321-488b25ffab64-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065334 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-trusted-ca\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065356 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/47b72971-05dc-4099-8e83-04ec202d36a6-service-ca-bundle\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065389 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgfcs\" (UniqueName: \"kubernetes.io/projected/47b72971-05dc-4099-8e83-04ec202d36a6-kube-api-access-sgfcs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065415 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-node-bootstrap-token\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065439 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-mountpoint-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065500 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-profile-collector-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065530 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-service-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065554 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q7bw\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-kube-api-access-8q7bw\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065613 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065640 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-config\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065664 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccfef0a8-abca-4437-8726-a1ddd184a798-trusted-ca\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065691 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065716 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-apiservice-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065738 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4k7v\" (UniqueName: \"kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065788 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065832 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/82d7d022-5521-4f60-b316-4101099d58ed-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065864 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065892 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c5416587-18c5-467b-99a9-f3bf9ba3c01e-metrics-tls\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065964 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-socket-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.065992 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-config\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066012 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-encryption-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066034 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-proxy-tls\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066052 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ccfef0a8-abca-4437-8726-a1ddd184a798-metrics-tls\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066073 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-config\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066094 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066119 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-plugins-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066152 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5c689a-2e2c-4938-8321-488b25ffab64-config\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066171 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b165c9e-1c64-4ecd-985a-c345cdb468f2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066190 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-cabundle\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066213 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/275fc4d3-9726-42db-b4a0-6ce515b6f66f-cert\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066232 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-tmpfs\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066253 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f870073-6820-465d-9c8c-9d6a39c46b3d-config\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066279 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066305 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-audit\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066340 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-policies\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066363 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rhb6\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066387 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck94w\" (UniqueName: \"kubernetes.io/projected/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-kube-api-access-ck94w\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066412 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-service-ca\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066435 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlw4m\" (UniqueName: \"kubernetes.io/projected/c5416587-18c5-467b-99a9-f3bf9ba3c01e-kube-api-access-rlw4m\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066460 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066486 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66k2k\" (UniqueName: \"kubernetes.io/projected/82d7d022-5521-4f60-b316-4101099d58ed-kube-api-access-66k2k\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066513 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c7xw\" (UniqueName: \"kubernetes.io/projected/b1bc1040-fa30-45f9-ab55-54673b3536a2-kube-api-access-9c7xw\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066538 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066581 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7vj2\" (UniqueName: \"kubernetes.io/projected/d26d5c3e-d8c6-4460-9b45-1a2c45971be5-kube-api-access-z7vj2\") pod \"downloads-7954f5f757-fsc9b\" (UID: \"d26d5c3e-d8c6-4460-9b45-1a2c45971be5\") " pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066605 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066628 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnqtk\" (UniqueName: \"kubernetes.io/projected/1fca2414-ce67-496f-a33b-b120986eba4c-kube-api-access-bnqtk\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066652 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-trusted-ca-bundle\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066692 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-default-certificate\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066720 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-profile-collector-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066753 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-encryption-config\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066779 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fd114bf-9629-4462-a3cd-8e856e111e2f-config\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066831 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n8f6\" (UniqueName: \"kubernetes.io/projected/be7d235a-3711-426b-a518-8937496a4db1-kube-api-access-8n8f6\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066916 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1c6bb39-a1f1-482a-bc94-824696f2a88b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066946 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3aed5125-9645-4a10-899e-175356a63e8e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.066972 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.067740 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.067744 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-client\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.067750 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068365 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e200aa7-865a-4eec-8a41-f2340b878ccc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068402 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-srv-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068426 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-csi-data-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068454 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-srv-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068479 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw96h\" (UniqueName: \"kubernetes.io/projected/9af41043-704d-4ab0-bd1e-f41bb5cd9a8c-kube-api-access-mw96h\") pod \"migrator-59844c95c7-x62jl\" (UID: \"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068505 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgk9b\" (UniqueName: \"kubernetes.io/projected/bdf5e1ee-978b-4c9e-964d-da16704faf96-kube-api-access-kgk9b\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068530 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgvjb\" (UniqueName: \"kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068559 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-oauth-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068585 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-serving-cert\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068607 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068631 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-images\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068656 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3d5c689a-2e2c-4938-8321-488b25ffab64-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068681 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b165c9e-1c64-4ecd-985a-c345cdb468f2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068707 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcjjc\" (UniqueName: \"kubernetes.io/projected/7fd114bf-9629-4462-a3cd-8e856e111e2f-kube-api-access-vcjjc\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068737 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-service-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068742 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-node-pullsecrets\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068789 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-node-pullsecrets\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068875 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068991 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069391 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-audit\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069405 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxbxt\" (UniqueName: \"kubernetes.io/projected/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-kube-api-access-qxbxt\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069439 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069466 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bdf5e1ee-978b-4c9e-964d-da16704faf96-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069497 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069523 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fd114bf-9629-4462-a3cd-8e856e111e2f-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069567 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/837cbb40-8ba1-4602-8bab-3457eec318cb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069576 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-service-ca\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069593 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069622 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e200aa7-865a-4eec-8a41-f2340b878ccc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069647 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k2mk\" (UniqueName: \"kubernetes.io/projected/ae2f2e1f-429f-4a8b-9556-26910294ab6e-kube-api-access-7k2mk\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069670 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-oauth-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069694 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-metrics-certs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069718 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f870073-6820-465d-9c8c-9d6a39c46b3d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069741 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z4bh\" (UniqueName: \"kubernetes.io/projected/7d4414e1-85cd-43a7-a232-8f4b285cd09c-kube-api-access-9z4bh\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069788 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-images\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069838 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-serving-cert\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069863 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069885 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-config\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069909 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgxx4\" (UniqueName: \"kubernetes.io/projected/65952c6c-017f-4092-b799-6ce4cda03518-kube-api-access-sgxx4\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069931 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr466\" (UniqueName: \"kubernetes.io/projected/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-kube-api-access-sr466\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069973 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kczwd\" (UniqueName: \"kubernetes.io/projected/e4929a7f-bea6-4417-94f8-3dfdb325719d-kube-api-access-kczwd\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.069995 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070019 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-webhook-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070064 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070087 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-config-volume\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070115 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2whsf\" (UniqueName: \"kubernetes.io/projected/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-kube-api-access-2whsf\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070123 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-policies\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070143 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-client\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070190 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070241 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcrqc\" (UniqueName: \"kubernetes.io/projected/837cbb40-8ba1-4602-8bab-3457eec318cb-kube-api-access-jcrqc\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070268 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-image-import-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070292 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-bound-sa-token\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070320 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070355 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlqnw\" (UniqueName: \"kubernetes.io/projected/3aed5125-9645-4a10-899e-175356a63e8e-kube-api-access-xlqnw\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070384 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070409 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brslv\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-kube-api-access-brslv\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070441 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-auth-proxy-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070467 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-audit-dir\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070491 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070533 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-serving-cert\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070587 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-serving-cert\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070615 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-config\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.070624 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.570600503 +0000 UTC m=+142.811341907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070724 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-metrics-tls\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070760 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070783 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1c6bb39-a1f1-482a-bc94-824696f2a88b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070847 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qtb5\" (UniqueName: \"kubernetes.io/projected/4e200aa7-865a-4eec-8a41-f2340b878ccc-kube-api-access-5qtb5\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070877 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65952c6c-017f-4092-b799-6ce4cda03518-proxy-tls\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070914 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jx8r\" (UniqueName: \"kubernetes.io/projected/89cda856-39cd-46fa-9efe-3df4eb1de216-kube-api-access-9jx8r\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070943 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-serving-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.070969 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071003 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071026 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96fnb\" (UniqueName: \"kubernetes.io/projected/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-kube-api-access-96fnb\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071049 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp8k7\" (UniqueName: \"kubernetes.io/projected/275fc4d3-9726-42db-b4a0-6ce515b6f66f-kube-api-access-mp8k7\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071079 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gvrz\" (UniqueName: \"kubernetes.io/projected/544e3fcf-4e43-4ace-a2ce-f14c43862794-kube-api-access-9gvrz\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071126 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-machine-approver-tls\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071182 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-certs\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.071303 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e4929a7f-bea6-4417-94f8-3dfdb325719d-audit-dir\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.068506 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-config\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.080632 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e4929a7f-bea6-4417-94f8-3dfdb325719d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.080877 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-serving-cert\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.081993 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-machine-approver-tls\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.082318 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2f2e1f-429f-4a8b-9556-26910294ab6e-serving-cert\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.082409 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-trusted-ca-bundle\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.082765 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e200aa7-865a-4eec-8a41-f2340b878ccc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.082920 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-client\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.083030 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-etcd-serving-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.083503 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1fca2414-ce67-496f-a33b-b120986eba4c-encryption-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.083531 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.083719 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.084003 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.083793 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.084559 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.085192 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-oauth-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.085210 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837cbb40-8ba1-4602-8bab-3457eec318cb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.085216 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.085199 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.085813 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1c6bb39-a1f1-482a-bc94-824696f2a88b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.086028 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.087185 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.087509 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-auth-proxy-config\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.087623 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-image-import-ca\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.087856 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-trusted-ca\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.087938 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-ca\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.088522 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1fca2414-ce67-496f-a33b-b120986eba4c-audit-dir\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.088667 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/837cbb40-8ba1-4602-8bab-3457eec318cb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.089339 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-console-config\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.089464 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.090057 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-etcd-client\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.091004 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.092269 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/82d7d022-5521-4f60-b316-4101099d58ed-images\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.093475 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fd114bf-9629-4462-a3cd-8e856e111e2f-config\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.095311 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e200aa7-865a-4eec-8a41-f2340b878ccc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.096089 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.096222 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1c6bb39-a1f1-482a-bc94-824696f2a88b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.096753 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2f2e1f-429f-4a8b-9556-26910294ab6e-config\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.096817 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fca2414-ce67-496f-a33b-b120986eba4c-config\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.097472 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89cda856-39cd-46fa-9efe-3df4eb1de216-serving-cert\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.098138 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.098143 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-encryption-config\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.098290 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.098305 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b1bc1040-fa30-45f9-ab55-54673b3536a2-oauth-serving-cert\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.099785 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.100792 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/82d7d022-5521-4f60-b316-4101099d58ed-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.101517 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4929a7f-bea6-4417-94f8-3dfdb325719d-serving-cert\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.107563 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fd114bf-9629-4462-a3cd-8e856e111e2f-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.108537 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck94w\" (UniqueName: \"kubernetes.io/projected/ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50-kube-api-access-ck94w\") pod \"machine-approver-56656f9798-rx8zc\" (UID: \"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.118423 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66k2k\" (UniqueName: \"kubernetes.io/projected/82d7d022-5521-4f60-b316-4101099d58ed-kube-api-access-66k2k\") pod \"machine-api-operator-5694c8668f-wq4v5\" (UID: \"82d7d022-5521-4f60-b316-4101099d58ed\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.137458 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c7xw\" (UniqueName: \"kubernetes.io/projected/b1bc1040-fa30-45f9-ab55-54673b3536a2-kube-api-access-9c7xw\") pod \"console-f9d7485db-67bvv\" (UID: \"b1bc1040-fa30-45f9-ab55-54673b3536a2\") " pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.159547 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rhb6\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172234 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-csi-data-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172269 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgk9b\" (UniqueName: \"kubernetes.io/projected/bdf5e1ee-978b-4c9e-964d-da16704faf96-kube-api-access-kgk9b\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172294 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-srv-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172310 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw96h\" (UniqueName: \"kubernetes.io/projected/9af41043-704d-4ab0-bd1e-f41bb5cd9a8c-kube-api-access-mw96h\") pod \"migrator-59844c95c7-x62jl\" (UID: \"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172328 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-images\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172343 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3d5c689a-2e2c-4938-8321-488b25ffab64-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172363 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b165c9e-1c64-4ecd-985a-c345cdb468f2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172386 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxbxt\" (UniqueName: \"kubernetes.io/projected/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-kube-api-access-qxbxt\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172402 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172418 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bdf5e1ee-978b-4c9e-964d-da16704faf96-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172443 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z4bh\" (UniqueName: \"kubernetes.io/projected/7d4414e1-85cd-43a7-a232-8f4b285cd09c-kube-api-access-9z4bh\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172474 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-metrics-certs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172491 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f870073-6820-465d-9c8c-9d6a39c46b3d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172508 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgxx4\" (UniqueName: \"kubernetes.io/projected/65952c6c-017f-4092-b799-6ce4cda03518-kube-api-access-sgxx4\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172526 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr466\" (UniqueName: \"kubernetes.io/projected/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-kube-api-access-sr466\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172543 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-webhook-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172595 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-config-volume\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172618 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2whsf\" (UniqueName: \"kubernetes.io/projected/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-kube-api-access-2whsf\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172636 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172652 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-bound-sa-token\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172677 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlqnw\" (UniqueName: \"kubernetes.io/projected/3aed5125-9645-4a10-899e-175356a63e8e-kube-api-access-xlqnw\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172694 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brslv\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-kube-api-access-brslv\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172710 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172726 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-serving-cert\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172741 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-metrics-tls\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172764 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65952c6c-017f-4092-b799-6ce4cda03518-proxy-tls\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172782 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172827 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp8k7\" (UniqueName: \"kubernetes.io/projected/275fc4d3-9726-42db-b4a0-6ce515b6f66f-kube-api-access-mp8k7\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172851 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gvrz\" (UniqueName: \"kubernetes.io/projected/544e3fcf-4e43-4ace-a2ce-f14c43862794-kube-api-access-9gvrz\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172868 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96fnb\" (UniqueName: \"kubernetes.io/projected/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-kube-api-access-96fnb\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172885 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-certs\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172903 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5m6v\" (UniqueName: \"kubernetes.io/projected/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-kube-api-access-s5m6v\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172917 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f870073-6820-465d-9c8c-9d6a39c46b3d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172934 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdjm6\" (UniqueName: \"kubernetes.io/projected/4b165c9e-1c64-4ecd-985a-c345cdb468f2-kube-api-access-pdjm6\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172938 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-csi-data-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.172955 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-stats-auth\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173019 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-registration-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173045 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbsj6\" (UniqueName: \"kubernetes.io/projected/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-kube-api-access-nbsj6\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173067 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hffn\" (UniqueName: \"kubernetes.io/projected/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-kube-api-access-5hffn\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173082 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173116 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173136 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnhcj\" (UniqueName: \"kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173162 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-key\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173180 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d5c689a-2e2c-4938-8321-488b25ffab64-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173217 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgfcs\" (UniqueName: \"kubernetes.io/projected/47b72971-05dc-4099-8e83-04ec202d36a6-kube-api-access-sgfcs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173239 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-node-bootstrap-token\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173256 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-mountpoint-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173276 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/47b72971-05dc-4099-8e83-04ec202d36a6-service-ca-bundle\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173294 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-profile-collector-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173321 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-config\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173337 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccfef0a8-abca-4437-8726-a1ddd184a798-trusted-ca\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173361 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-apiservice-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173379 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4k7v\" (UniqueName: \"kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173408 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c5416587-18c5-467b-99a9-f3bf9ba3c01e-metrics-tls\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173426 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-socket-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173445 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-proxy-tls\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173461 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ccfef0a8-abca-4437-8726-a1ddd184a798-metrics-tls\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173481 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5c689a-2e2c-4938-8321-488b25ffab64-config\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173500 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b165c9e-1c64-4ecd-985a-c345cdb468f2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173529 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-plugins-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173554 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-tmpfs\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173575 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f870073-6820-465d-9c8c-9d6a39c46b3d-config\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173594 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-cabundle\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173614 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/275fc4d3-9726-42db-b4a0-6ce515b6f66f-cert\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173637 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173664 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlw4m\" (UniqueName: \"kubernetes.io/projected/c5416587-18c5-467b-99a9-f3bf9ba3c01e-kube-api-access-rlw4m\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173688 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173744 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-default-certificate\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173768 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-profile-collector-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173823 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n8f6\" (UniqueName: \"kubernetes.io/projected/be7d235a-3711-426b-a518-8937496a4db1-kube-api-access-8n8f6\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173847 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3aed5125-9645-4a10-899e-175356a63e8e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.173884 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-srv-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.175970 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-tmpfs\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.176082 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccfef0a8-abca-4437-8726-a1ddd184a798-trusted-ca\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.178276 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f870073-6820-465d-9c8c-9d6a39c46b3d-config\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.178329 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.178612 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-registration-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.179325 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.179779 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-stats-auth\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.179926 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-config-volume\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.180965 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-srv-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.181060 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-plugins-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.180977 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b165c9e-1c64-4ecd-985a-c345cdb468f2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.181621 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-srv-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.182373 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.182407 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65952c6c-017f-4092-b799-6ce4cda03518-images\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.182970 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-cabundle\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.183140 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgvjb\" (UniqueName: \"kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb\") pod \"controller-manager-879f6c89f-sjf4l\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.183173 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.183361 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-socket-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.183492 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ccfef0a8-abca-4437-8726-a1ddd184a798-metrics-tls\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.183561 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.683540637 +0000 UTC m=+142.924282031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.183678 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/be7d235a-3711-426b-a518-8937496a4db1-mountpoint-dir\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.184542 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5c689a-2e2c-4938-8321-488b25ffab64-config\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.186854 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-config\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.188196 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/544e3fcf-4e43-4ace-a2ce-f14c43862794-profile-collector-cert\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.189446 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f870073-6820-465d-9c8c-9d6a39c46b3d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.190392 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/275fc4d3-9726-42db-b4a0-6ce515b6f66f-cert\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.191034 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3aed5125-9645-4a10-899e-175356a63e8e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.193011 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/47b72971-05dc-4099-8e83-04ec202d36a6-service-ca-bundle\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.193561 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b165c9e-1c64-4ecd-985a-c345cdb468f2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.193944 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65952c6c-017f-4092-b799-6ce4cda03518-proxy-tls\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.194329 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-metrics-certs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.194714 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.195166 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-serving-cert\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.199181 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bdf5e1ee-978b-4c9e-964d-da16704faf96-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.200172 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-certs\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.200638 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7vj2\" (UniqueName: \"kubernetes.io/projected/d26d5c3e-d8c6-4460-9b45-1a2c45971be5-kube-api-access-z7vj2\") pod \"downloads-7954f5f757-fsc9b\" (UID: \"d26d5c3e-d8c6-4460-9b45-1a2c45971be5\") " pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.201177 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.201863 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.202091 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7d4414e1-85cd-43a7-a232-8f4b285cd09c-profile-collector-cert\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.202563 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.203408 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-apiservice-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.204217 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.204756 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-metrics-tls\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.204983 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/47b72971-05dc-4099-8e83-04ec202d36a6-default-certificate\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.205229 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d5c689a-2e2c-4938-8321-488b25ffab64-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.205400 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c5416587-18c5-467b-99a9-f3bf9ba3c01e-metrics-tls\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.205465 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-signing-key\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.205695 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-webhook-cert\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.211647 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-proxy-tls\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.213933 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-node-bootstrap-token\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: W1124 17:50:53.221564 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b1e5e1b_4ea3_4481_9bdc_f24a7689775a.slice/crio-1c5eb18a9e1fa6a44aa65baee5f8fac9077e55b015498bf01189a6d4b980f372 WatchSource:0}: Error finding container 1c5eb18a9e1fa6a44aa65baee5f8fac9077e55b015498bf01189a6d4b980f372: Status 404 returned error can't find the container with id 1c5eb18a9e1fa6a44aa65baee5f8fac9077e55b015498bf01189a6d4b980f372 Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.222355 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnqtk\" (UniqueName: \"kubernetes.io/projected/1fca2414-ce67-496f-a33b-b120986eba4c-kube-api-access-bnqtk\") pod \"apiserver-76f77b778f-k4zhk\" (UID: \"1fca2414-ce67-496f-a33b-b120986eba4c\") " pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.226757 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jzpbc"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.236589 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q7bw\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-kube-api-access-8q7bw\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.259431 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1c6bb39-a1f1-482a-bc94-824696f2a88b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dvrvn\" (UID: \"d1c6bb39-a1f1-482a-bc94-824696f2a88b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.275592 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.276161 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.776143674 +0000 UTC m=+143.016884838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.285168 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" event={"ID":"2e74aff5-38e7-42fb-b2dc-172662221443","Type":"ContainerStarted","Data":"629b5fffdcd9c17270373fb2310ab8f3cc2e3e62fb0eb3f06640de095104ccd4"} Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.285889 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.288985 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qtb5\" (UniqueName: \"kubernetes.io/projected/4e200aa7-865a-4eec-8a41-f2340b878ccc-kube-api-access-5qtb5\") pod \"openshift-controller-manager-operator-756b6f6bc6-bf65j\" (UID: \"4e200aa7-865a-4eec-8a41-f2340b878ccc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.293215 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" event={"ID":"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a","Type":"ContainerStarted","Data":"1c5eb18a9e1fa6a44aa65baee5f8fac9077e55b015498bf01189a6d4b980f372"} Nov 24 17:50:53 crc kubenswrapper[4702]: W1124 17:50:53.297649 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3ed1886_2cab_466f_a497_c6d8faa7955f.slice/crio-6460374517a54fce0d42c6c69e04f7645f19d188910de382de9e80ea2d32861f WatchSource:0}: Error finding container 6460374517a54fce0d42c6c69e04f7645f19d188910de382de9e80ea2d32861f: Status 404 returned error can't find the container with id 6460374517a54fce0d42c6c69e04f7645f19d188910de382de9e80ea2d32861f Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.298782 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jx8r\" (UniqueName: \"kubernetes.io/projected/89cda856-39cd-46fa-9efe-3df4eb1de216-kube-api-access-9jx8r\") pod \"etcd-operator-b45778765-hw6dk\" (UID: \"89cda856-39cd-46fa-9efe-3df4eb1de216\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.302351 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.318545 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.319123 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc"] Nov 24 17:50:53 crc kubenswrapper[4702]: W1124 17:50:53.333930 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab08f801_30f3_4c34_9bfc_bbfcb3c2ef50.slice/crio-c949fab74149332b2f7f2bba57e1bc7d26a8e9981848eaa52beb3737e9fb4775 WatchSource:0}: Error finding container c949fab74149332b2f7f2bba57e1bc7d26a8e9981848eaa52beb3737e9fb4775: Status 404 returned error can't find the container with id c949fab74149332b2f7f2bba57e1bc7d26a8e9981848eaa52beb3737e9fb4775 Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.335950 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcrqc\" (UniqueName: \"kubernetes.io/projected/837cbb40-8ba1-4602-8bab-3457eec318cb-kube-api-access-jcrqc\") pod \"openshift-config-operator-7777fb866f-4nf8m\" (UID: \"837cbb40-8ba1-4602-8bab-3457eec318cb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.337382 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.345928 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.357330 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kczwd\" (UniqueName: \"kubernetes.io/projected/e4929a7f-bea6-4417-94f8-3dfdb325719d-kube-api-access-kczwd\") pod \"apiserver-7bbb656c7d-k44cv\" (UID: \"e4929a7f-bea6-4417-94f8-3dfdb325719d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.365510 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.372864 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.376074 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wq4v5"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.377937 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.378259 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.878246627 +0000 UTC m=+143.118987791 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.382866 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcjjc\" (UniqueName: \"kubernetes.io/projected/7fd114bf-9629-4462-a3cd-8e856e111e2f-kube-api-access-vcjjc\") pod \"openshift-apiserver-operator-796bbdcf4f-42v6m\" (UID: \"7fd114bf-9629-4462-a3cd-8e856e111e2f\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.391088 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.397258 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k2mk\" (UniqueName: \"kubernetes.io/projected/ae2f2e1f-429f-4a8b-9556-26910294ab6e-kube-api-access-7k2mk\") pod \"console-operator-58897d9998-krcnx\" (UID: \"ae2f2e1f-429f-4a8b-9556-26910294ab6e\") " pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.418167 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.426648 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lshph\" (UniqueName: \"kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph\") pod \"route-controller-manager-6576b87f9c-t7l4h\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.445420 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.469991 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.474871 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxbxt\" (UniqueName: \"kubernetes.io/projected/5bed465a-fc34-4e16-a5ca-6ec3fba92edc-kube-api-access-qxbxt\") pod \"service-ca-9c57cc56f-s65mg\" (UID: \"5bed465a-fc34-4e16-a5ca-6ec3fba92edc\") " pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.478680 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.479099 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.979067001 +0000 UTC m=+143.219808265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.479351 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.480129 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:53.980118685 +0000 UTC m=+143.220859849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.481529 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4k7v\" (UniqueName: \"kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v\") pod \"marketplace-operator-79b997595-zs4n4\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.496348 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgk9b\" (UniqueName: \"kubernetes.io/projected/bdf5e1ee-978b-4c9e-964d-da16704faf96-kube-api-access-kgk9b\") pod \"package-server-manager-789f6589d5-9pcn8\" (UID: \"bdf5e1ee-978b-4c9e-964d-da16704faf96\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.496641 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.516577 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlw4m\" (UniqueName: \"kubernetes.io/projected/c5416587-18c5-467b-99a9-f3bf9ba3c01e-kube-api-access-rlw4m\") pod \"dns-operator-744455d44c-78ghc\" (UID: \"c5416587-18c5-467b-99a9-f3bf9ba3c01e\") " pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.516920 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.521067 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.528292 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.537683 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.545355 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brslv\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-kube-api-access-brslv\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.558873 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw96h\" (UniqueName: \"kubernetes.io/projected/9af41043-704d-4ab0-bd1e-f41bb5cd9a8c-kube-api-access-mw96h\") pod \"migrator-59844c95c7-x62jl\" (UID: \"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.573627 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.573829 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fsc9b"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.580258 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.580276 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z4bh\" (UniqueName: \"kubernetes.io/projected/7d4414e1-85cd-43a7-a232-8f4b285cd09c-kube-api-access-9z4bh\") pod \"catalog-operator-68c6474976-mmlk2\" (UID: \"7d4414e1-85cd-43a7-a232-8f4b285cd09c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.580487 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.080467173 +0000 UTC m=+143.321208337 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.580736 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.581103 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.081094574 +0000 UTC m=+143.321835738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.596171 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n8f6\" (UniqueName: \"kubernetes.io/projected/be7d235a-3711-426b-a518-8937496a4db1-kube-api-access-8n8f6\") pod \"csi-hostpathplugin-vwg94\" (UID: \"be7d235a-3711-426b-a518-8937496a4db1\") " pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.619488 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.629771 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbsj6\" (UniqueName: \"kubernetes.io/projected/a2146f1c-7229-4a1f-8da8-0fca6ab8c424-kube-api-access-nbsj6\") pod \"service-ca-operator-777779d784-4l7wj\" (UID: \"a2146f1c-7229-4a1f-8da8-0fca6ab8c424\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.647609 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hffn\" (UniqueName: \"kubernetes.io/projected/a04e13e7-ff96-4153-b994-e9ccbe20b3ce-kube-api-access-5hffn\") pod \"packageserver-d55dfcdfc-4jkxd\" (UID: \"a04e13e7-ff96-4153-b994-e9ccbe20b3ce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.659888 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.663269 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gvrz\" (UniqueName: \"kubernetes.io/projected/544e3fcf-4e43-4ace-a2ce-f14c43862794-kube-api-access-9gvrz\") pod \"olm-operator-6b444d44fb-545qh\" (UID: \"544e3fcf-4e43-4ace-a2ce-f14c43862794\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.686596 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.687080 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.187063718 +0000 UTC m=+143.427804882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.693498 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3d5c689a-2e2c-4938-8321-488b25ffab64-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-skdmk\" (UID: \"3d5c689a-2e2c-4938-8321-488b25ffab64\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.710495 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96fnb\" (UniqueName: \"kubernetes.io/projected/b28247d0-f3b1-4d4b-bed4-0af181a81dcf-kube-api-access-96fnb\") pod \"dns-default-5bsls\" (UID: \"b28247d0-f3b1-4d4b-bed4-0af181a81dcf\") " pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.713594 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.725473 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.739569 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgxx4\" (UniqueName: \"kubernetes.io/projected/65952c6c-017f-4092-b799-6ce4cda03518-kube-api-access-sgxx4\") pod \"machine-config-operator-74547568cd-dlzxp\" (UID: \"65952c6c-017f-4092-b799-6ce4cda03518\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.759209 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5m6v\" (UniqueName: \"kubernetes.io/projected/ea1bd1a9-9346-4125-9af6-4c2d68e4de12-kube-api-access-s5m6v\") pod \"machine-config-controller-84d6567774-dt4m6\" (UID: \"ea1bd1a9-9346-4125-9af6-4c2d68e4de12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.761241 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f870073-6820-465d-9c8c-9d6a39c46b3d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fdlhm\" (UID: \"4f870073-6820-465d-9c8c-9d6a39c46b3d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.781043 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2whsf\" (UniqueName: \"kubernetes.io/projected/aa55d4fd-a2ed-4b4f-acb5-03b1704de025-kube-api-access-2whsf\") pod \"control-plane-machine-set-operator-78cbb6b69f-k4vgs\" (UID: \"aa55d4fd-a2ed-4b4f-acb5-03b1704de025\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.781238 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.789266 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.789838 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.289821424 +0000 UTC m=+143.530562578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.790102 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" Nov 24 17:50:53 crc kubenswrapper[4702]: W1124 17:50:53.797738 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod837cbb40_8ba1_4602_8bab_3457eec318cb.slice/crio-b07f821b6bf81ce16f20ece91d3500b7304d6e7d0825fa37b01805f9e98b3286 WatchSource:0}: Error finding container b07f821b6bf81ce16f20ece91d3500b7304d6e7d0825fa37b01805f9e98b3286: Status 404 returned error can't find the container with id b07f821b6bf81ce16f20ece91d3500b7304d6e7d0825fa37b01805f9e98b3286 Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.806647 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnhcj\" (UniqueName: \"kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj\") pod \"collect-profiles-29400105-wrxpp\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.814500 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.835584 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgfcs\" (UniqueName: \"kubernetes.io/projected/47b72971-05dc-4099-8e83-04ec202d36a6-kube-api-access-sgfcs\") pod \"router-default-5444994796-r5tsm\" (UID: \"47b72971-05dc-4099-8e83-04ec202d36a6\") " pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.836515 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.843204 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.849673 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlqnw\" (UniqueName: \"kubernetes.io/projected/3aed5125-9645-4a10-899e-175356a63e8e-kube-api-access-xlqnw\") pod \"multus-admission-controller-857f4d67dd-gb2f9\" (UID: \"3aed5125-9645-4a10-899e-175356a63e8e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.850719 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.858575 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.874607 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ccfef0a8-abca-4437-8726-a1ddd184a798-bound-sa-token\") pod \"ingress-operator-5b745b69d9-th6sc\" (UID: \"ccfef0a8-abca-4437-8726-a1ddd184a798\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.875232 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.875438 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.892085 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.892623 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.392581829 +0000 UTC m=+143.633323003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.893149 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.893348 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.893947 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.39392911 +0000 UTC m=+143.634670274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.894335 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.919266 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp8k7\" (UniqueName: \"kubernetes.io/projected/275fc4d3-9726-42db-b4a0-6ce515b6f66f-kube-api-access-mp8k7\") pod \"ingress-canary-gpf8l\" (UID: \"275fc4d3-9726-42db-b4a0-6ce515b6f66f\") " pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.923508 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdjm6\" (UniqueName: \"kubernetes.io/projected/4b165c9e-1c64-4ecd-985a-c345cdb468f2-kube-api-access-pdjm6\") pod \"kube-storage-version-migrator-operator-b67b599dd-f268p\" (UID: \"4b165c9e-1c64-4ecd-985a-c345cdb468f2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.929986 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.933266 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr466\" (UniqueName: \"kubernetes.io/projected/b68f749c-133c-47dc-b3cc-fa2fa2e0d575-kube-api-access-sr466\") pod \"machine-config-server-9swhp\" (UID: \"b68f749c-133c-47dc-b3cc-fa2fa2e0d575\") " pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.939102 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9swhp" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.942816 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-gpf8l" Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.943492 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.954181 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.955914 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn"] Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.997080 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.997257 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.497221832 +0000 UTC m=+143.737963006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:53 crc kubenswrapper[4702]: I1124 17:50:53.997956 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:53 crc kubenswrapper[4702]: E1124 17:50:53.998434 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.49841535 +0000 UTC m=+143.739156514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.035557 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.067946 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.072632 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.102655 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.103002 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.60297707 +0000 UTC m=+143.843718234 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.103264 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.103968 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.603954641 +0000 UTC m=+143.844695815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.105290 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.157078 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-67bvv"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.206217 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.206434 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.706383106 +0000 UTC m=+143.947124270 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.206537 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.206914 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.706906712 +0000 UTC m=+143.947647876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.307440 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.307640 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.807613873 +0000 UTC m=+144.048355037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.307782 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.308169 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.808160579 +0000 UTC m=+144.048901743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.310560 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9swhp" event={"ID":"b68f749c-133c-47dc-b3cc-fa2fa2e0d575","Type":"ContainerStarted","Data":"648e16daf13bf05fcee8d53071464d86238ff12d931db97ae3ee8126ae29ef47"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.312289 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" event={"ID":"2e74aff5-38e7-42fb-b2dc-172662221443","Type":"ContainerStarted","Data":"5ca64480cc99837fafa9cf96351486b0e69defb4e1d3c1ae793f5940caa0f253"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.313650 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" event={"ID":"837cbb40-8ba1-4602-8bab-3457eec318cb","Type":"ContainerStarted","Data":"b07f821b6bf81ce16f20ece91d3500b7304d6e7d0825fa37b01805f9e98b3286"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.315902 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" event={"ID":"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a","Type":"ContainerStarted","Data":"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.315983 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.319778 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" event={"ID":"4e200aa7-865a-4eec-8a41-f2340b878ccc","Type":"ContainerStarted","Data":"97b57a7382b0e93d91f987d4ba6a60f6fe986542cf2b801f76c3cd33d633eb71"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.329365 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" event={"ID":"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50","Type":"ContainerStarted","Data":"11b5c87edacc50dd94176899f31f12e86b62c638e586b183cbfc84601408dd52"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.329415 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" event={"ID":"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50","Type":"ContainerStarted","Data":"c949fab74149332b2f7f2bba57e1bc7d26a8e9981848eaa52beb3737e9fb4775"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.340403 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" event={"ID":"82d7d022-5521-4f60-b316-4101099d58ed","Type":"ContainerStarted","Data":"cb4e38622e2d5cd99cd1d0e965cf2ed073cd54d96d0d290ed5d0dfa5b13e58de"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.340466 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" event={"ID":"82d7d022-5521-4f60-b316-4101099d58ed","Type":"ContainerStarted","Data":"5400373f0b717786b17c62af0ac43a300b44d6c61e57c2f9762e38114c49d31a"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.340478 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" event={"ID":"82d7d022-5521-4f60-b316-4101099d58ed","Type":"ContainerStarted","Data":"dcadf5f34b9571f2884344089bc5b69ab0045239e07552f15f10bd467b62a04c"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.342126 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-67bvv" event={"ID":"b1bc1040-fa30-45f9-ab55-54673b3536a2","Type":"ContainerStarted","Data":"77fec615f75ae812261577e88bf1ccaacb38097a95c958583e3d657e39ac7b09"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.344957 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" event={"ID":"35419044-f572-4b95-9811-46e6ab51c87a","Type":"ContainerStarted","Data":"1cce1e5fc4822a179c73c3746db1fbc366ccd26bce93deab5aa843e009c72bc0"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.345028 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" event={"ID":"35419044-f572-4b95-9811-46e6ab51c87a","Type":"ContainerStarted","Data":"7c680018b7d299d3e4abdf9730b252323fc197c07bfae5ed85b5031ed8b15721"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.345050 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" event={"ID":"35419044-f572-4b95-9811-46e6ab51c87a","Type":"ContainerStarted","Data":"d59c362bf32677a4100ef17304d59493766c9c58d7ae3dc00d2befd8981def0b"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.346319 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fsc9b" event={"ID":"d26d5c3e-d8c6-4460-9b45-1a2c45971be5","Type":"ContainerStarted","Data":"77d078c132dc2b21e8aef2df3310c14ae737a68eb15d7ba678f02328888775d1"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.346379 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fsc9b" event={"ID":"d26d5c3e-d8c6-4460-9b45-1a2c45971be5","Type":"ContainerStarted","Data":"98dec58865a9205826a33c58ef8c78d098919c2d319dece86ef8cd2bb82f7e85"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.346950 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.351350 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.351400 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.353972 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" event={"ID":"d1c6bb39-a1f1-482a-bc94-824696f2a88b","Type":"ContainerStarted","Data":"150f49dcf0996579e4bf6ef818c31078b48555a80c0899b4bf0e93a691c0aca6"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.372789 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" event={"ID":"f3ed1886-2cab-466f-a497-c6d8faa7955f","Type":"ContainerStarted","Data":"ec5dafb44a510a6ee6acbe73d6d8ef13f596dd2e85ffabc7e0e775d7a1602cb7"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.372862 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" event={"ID":"f3ed1886-2cab-466f-a497-c6d8faa7955f","Type":"ContainerStarted","Data":"6460374517a54fce0d42c6c69e04f7645f19d188910de382de9e80ea2d32861f"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.374385 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" event={"ID":"47976e32-f007-462e-9df5-e2c674f8b73a","Type":"ContainerStarted","Data":"c9e761b2c056a832556741aa7f554befd55ffdf53efb911f578c38f9e3aedc0e"} Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.409393 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.409615 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.909578452 +0000 UTC m=+144.150319626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.410052 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.412060 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:54.912046429 +0000 UTC m=+144.152787593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.526033 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.526512 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.026483059 +0000 UTC m=+144.267224223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.526901 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.529534 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.029511754 +0000 UTC m=+144.270252918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.628302 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.629255 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.129221534 +0000 UTC m=+144.369962698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.645040 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.655815 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s65mg"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.675031 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-krcnx"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.677693 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.680994 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.689366 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hw6dk"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.691810 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.713608 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k4zhk"] Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.731106 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.731823 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.231778051 +0000 UTC m=+144.472519205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.772371 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:50:54 crc kubenswrapper[4702]: W1124 17:50:54.820357 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bed465a_fc34_4e16_a5ca_6ec3fba92edc.slice/crio-f62839877faea34976b87db4d4157cdd899c1d5cc000d9ef51cd893b1211246e WatchSource:0}: Error finding container f62839877faea34976b87db4d4157cdd899c1d5cc000d9ef51cd893b1211246e: Status 404 returned error can't find the container with id f62839877faea34976b87db4d4157cdd899c1d5cc000d9ef51cd893b1211246e Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.822701 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-fsc9b" podStartSLOduration=123.822683306 podStartE2EDuration="2m3.822683306s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:54.820515098 +0000 UTC m=+144.061256292" watchObservedRunningTime="2025-11-24 17:50:54.822683306 +0000 UTC m=+144.063424460" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.833381 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.833735 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.333718101 +0000 UTC m=+144.574459265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.958694 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:54 crc kubenswrapper[4702]: E1124 17:50:54.959128 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.459112314 +0000 UTC m=+144.699853478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.963536 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vpwsc" podStartSLOduration=123.963508821 podStartE2EDuration="2m3.963508821s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:54.958076621 +0000 UTC m=+144.198817795" watchObservedRunningTime="2025-11-24 17:50:54.963508821 +0000 UTC m=+144.204249985" Nov 24 17:50:54 crc kubenswrapper[4702]: I1124 17:50:54.964077 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-jzpbc" podStartSLOduration=123.964069258 podStartE2EDuration="2m3.964069258s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:54.88100531 +0000 UTC m=+144.121746464" watchObservedRunningTime="2025-11-24 17:50:54.964069258 +0000 UTC m=+144.204810422" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.060154 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.060474 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.560458614 +0000 UTC m=+144.801199778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.102401 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.139473 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-wq4v5" podStartSLOduration=124.139455365 podStartE2EDuration="2m4.139455365s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.13770937 +0000 UTC m=+144.378450554" watchObservedRunningTime="2025-11-24 17:50:55.139455365 +0000 UTC m=+144.380196519" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.161352 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.161698 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.661686131 +0000 UTC m=+144.902427295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.178524 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-78ghc"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.257540 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.262502 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.263238 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.263824 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.763786424 +0000 UTC m=+145.004527588 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.264687 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-vwg94"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.281986 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.285555 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh"] Nov 24 17:50:55 crc kubenswrapper[4702]: W1124 17:50:55.285717 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa55d4fd_a2ed_4b4f_acb5_03b1704de025.slice/crio-69d02e2c7aa241b1a4d1a3ae79d8bb9559dab39142f1f15ad5c932108a89c56f WatchSource:0}: Error finding container 69d02e2c7aa241b1a4d1a3ae79d8bb9559dab39142f1f15ad5c932108a89c56f: Status 404 returned error can't find the container with id 69d02e2c7aa241b1a4d1a3ae79d8bb9559dab39142f1f15ad5c932108a89c56f Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.290351 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.291115 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.303699 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.307576 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.317977 4702 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-n2bxq container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.318049 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.326374 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.364670 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.365409 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.865393884 +0000 UTC m=+145.106135048 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.373609 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" podStartSLOduration=124.373581579 podStartE2EDuration="2m4.373581579s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.366452396 +0000 UTC m=+144.607193580" watchObservedRunningTime="2025-11-24 17:50:55.373581579 +0000 UTC m=+144.614322763" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.430736 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.442482 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-gpf8l"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.458887 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.463087 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" event={"ID":"5bed465a-fc34-4e16-a5ca-6ec3fba92edc","Type":"ContainerStarted","Data":"dce6b8e2a77b7a7ed6f50a79e3d91d7286e52d7f5313eb2d6ba59f7d4b72e821"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.463160 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" event={"ID":"5bed465a-fc34-4e16-a5ca-6ec3fba92edc","Type":"ContainerStarted","Data":"f62839877faea34976b87db4d4157cdd899c1d5cc000d9ef51cd893b1211246e"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.465950 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.467836 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:55.967772246 +0000 UTC m=+145.208513410 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.471360 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" event={"ID":"ab08f801-30f3-4c34-9bfc-bbfcb3c2ef50","Type":"ContainerStarted","Data":"2816e694af2ccb1a3e8c587a1214707b07be8e71742bbbc3369dfc8766640ac5"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.484051 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" event={"ID":"c5416587-18c5-467b-99a9-f3bf9ba3c01e","Type":"ContainerStarted","Data":"aa8777876fdf1a757021291323a52adf699e8fc93b93302f25eaa2800b43772b"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.491297 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.491954 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" event={"ID":"aa55d4fd-a2ed-4b4f-acb5-03b1704de025","Type":"ContainerStarted","Data":"69d02e2c7aa241b1a4d1a3ae79d8bb9559dab39142f1f15ad5c932108a89c56f"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.496223 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-67bvv" event={"ID":"b1bc1040-fa30-45f9-ab55-54673b3536a2","Type":"ContainerStarted","Data":"9fed2ae91eb4815fefac22e207e7b3f809b23ea5d5cc7b91cf4598aa62523e51"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.500080 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" event={"ID":"bdf5e1ee-978b-4c9e-964d-da16704faf96","Type":"ContainerStarted","Data":"19d98bcbecdea6edf2137d7a27d910f96050d18fcb64e9559286f74fa38ce84c"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.500127 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" event={"ID":"bdf5e1ee-978b-4c9e-964d-da16704faf96","Type":"ContainerStarted","Data":"96e657ec472af55db3c051283e61aae9361778b080495a20520dc6ddc0ff320f"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.502227 4702 generic.go:334] "Generic (PLEG): container finished" podID="837cbb40-8ba1-4602-8bab-3457eec318cb" containerID="63367ef432b5a1d5579e6a1e5d9af480da38dc7eca76199e83406a75330d23ac" exitCode=0 Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.502273 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" event={"ID":"837cbb40-8ba1-4602-8bab-3457eec318cb","Type":"ContainerDied","Data":"63367ef432b5a1d5579e6a1e5d9af480da38dc7eca76199e83406a75330d23ac"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.502963 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5bsls"] Nov 24 17:50:55 crc kubenswrapper[4702]: W1124 17:50:55.504699 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b165c9e_1c64_4ecd_985a_c345cdb468f2.slice/crio-1677cf08510feb7459d4b119f3299b8d52d2ff5ed79296b9c8b4633f683d4a31 WatchSource:0}: Error finding container 1677cf08510feb7459d4b119f3299b8d52d2ff5ed79296b9c8b4633f683d4a31: Status 404 returned error can't find the container with id 1677cf08510feb7459d4b119f3299b8d52d2ff5ed79296b9c8b4633f683d4a31 Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.507612 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" event={"ID":"206b06d8-9020-4e3f-b055-1a1bb10b0bcd","Type":"ContainerStarted","Data":"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.507654 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" event={"ID":"206b06d8-9020-4e3f-b055-1a1bb10b0bcd","Type":"ContainerStarted","Data":"b3633835317117a9bc6522ab889c2d5eac9aa1acf2d127b09f41bcbbd80d1b8d"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.508183 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.513149 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gb2f9"] Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.514576 4702 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zs4n4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.514624 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.531726 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" event={"ID":"89cda856-39cd-46fa-9efe-3df4eb1de216","Type":"ContainerStarted","Data":"8adfc1ca66a4e3620ac2f41086a4d12c61146600d9b7cffd3960124f843a5c93"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.542379 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-krcnx" event={"ID":"ae2f2e1f-429f-4a8b-9556-26910294ab6e","Type":"ContainerStarted","Data":"5f7ba7826886d359053f9e3add4ff196d7f7f2e3735b65846d7b335b542ed6a6"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.542437 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-krcnx" event={"ID":"ae2f2e1f-429f-4a8b-9556-26910294ab6e","Type":"ContainerStarted","Data":"7b00e49ecd991a48f51879835ecf5528a4639acaab8e55f30792fe5ffd5c2ff7"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.543464 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.547862 4702 patch_prober.go:28] interesting pod/console-operator-58897d9998-krcnx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.547929 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-krcnx" podUID="ae2f2e1f-429f-4a8b-9556-26910294ab6e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.548997 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-r5tsm" event={"ID":"47b72971-05dc-4099-8e83-04ec202d36a6","Type":"ContainerStarted","Data":"5533b0930d85406211734697399f5125ea2819d68dc1bc97bb018ea385ff7792"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.549043 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-r5tsm" event={"ID":"47b72971-05dc-4099-8e83-04ec202d36a6","Type":"ContainerStarted","Data":"a1f4cb2bfccbf3ef04673ed6ba3bb1a795d92781634204af22ad4f131806da37"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.565256 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" event={"ID":"1fca2414-ce67-496f-a33b-b120986eba4c","Type":"ContainerStarted","Data":"47aa0d93c9380a66c0b43a227f20e85ff7903c68190c0b12ac788716134c2d7a"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.567495 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.567612 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" event={"ID":"65952c6c-017f-4092-b799-6ce4cda03518","Type":"ContainerStarted","Data":"3ddfe09237ffa4fd5bcc8ea71532c76cabc2b20d391adde50ed53eecffb2749f"} Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.571133 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.071112339 +0000 UTC m=+145.311853723 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.575880 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" event={"ID":"7d4414e1-85cd-43a7-a232-8f4b285cd09c","Type":"ContainerStarted","Data":"597de8da5677b6cacdf2bc4ae7ec0585a0c798c9dc5d4bc72a8a8800bbc51f25"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.578288 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" event={"ID":"4e200aa7-865a-4eec-8a41-f2340b878ccc","Type":"ContainerStarted","Data":"2518e9c5be6c22a268062fbdbd2664fc885731a972ddd9524517ed3d168c549e"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.580150 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" event={"ID":"3d5c689a-2e2c-4938-8321-488b25ffab64","Type":"ContainerStarted","Data":"d23f74621ebcc47c083980022ac73b1cb2b5e0f0aedc9c7047b70c8531b12363"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.582021 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" event={"ID":"d1c6bb39-a1f1-482a-bc94-824696f2a88b","Type":"ContainerStarted","Data":"2ae113221c6a05dddcfa525a05f25e4e932db74857ddd6627f29182254bfa0e1"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.584047 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" event={"ID":"544e3fcf-4e43-4ace-a2ce-f14c43862794","Type":"ContainerStarted","Data":"443293534a1515fb7cba0f7d77b9d7b49d0b6fabf230170284e557d242130b80"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.584850 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" event={"ID":"e4929a7f-bea6-4417-94f8-3dfdb325719d","Type":"ContainerStarted","Data":"ab11382def6cb4a19524c59dc4b8512e722e2d4922c11bbaac7aa0c14d1da5ba"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.587721 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" event={"ID":"ea1bd1a9-9346-4125-9af6-4c2d68e4de12","Type":"ContainerStarted","Data":"d73f761a93cc9a45e515dce95bf4b351c0522ebd87e76513b485d270254ad64f"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.592538 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" event={"ID":"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c","Type":"ContainerStarted","Data":"d5397484befa4d0f50baec1928d7ff864430a037a8a95ffd50f5bd2c01c92175"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.595613 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" event={"ID":"d2a41904-b404-4a22-88e0-5d947e877ced","Type":"ContainerStarted","Data":"0e29db102f52c87bb2267735954de7f5b20e5df375bc34220e8fca4a30a50cce"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.604219 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" event={"ID":"47976e32-f007-462e-9df5-e2c674f8b73a","Type":"ContainerStarted","Data":"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.606010 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.606584 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" event={"ID":"a04e13e7-ff96-4153-b994-e9ccbe20b3ce","Type":"ContainerStarted","Data":"d0903301234be5284af866050b41afa2e62608170e1e7c78f7f40c3ef860dcd7"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.607501 4702 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-sjf4l container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.607629 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.615836 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9swhp" event={"ID":"b68f749c-133c-47dc-b3cc-fa2fa2e0d575","Type":"ContainerStarted","Data":"a23e0cdfdf386b46a705d35c04f9e045120674107e1ad13355f3c06dcaad6572"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.620364 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" event={"ID":"435d3ff9-5e63-414a-a5ea-1baf2d52c14c","Type":"ContainerStarted","Data":"ccec92838ece5841c2f51376584a918ffafa3f672295b02a0957fef3fff5fdec"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.621449 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.622343 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" event={"ID":"be7d235a-3711-426b-a518-8937496a4db1","Type":"ContainerStarted","Data":"0b4f4028f3f3fd81c8ae4756b3b099696ee6ae3a4a5ef55b3d972daf6c3aa65f"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.623860 4702 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t7l4h container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.623919 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.638359 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" event={"ID":"7fd114bf-9629-4462-a3cd-8e856e111e2f","Type":"ContainerStarted","Data":"aa79cb56291f3a6bcd0bcbb6f4f05f829dd05bcdda040f207a36d32c673aaed7"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.638440 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" event={"ID":"7fd114bf-9629-4462-a3cd-8e856e111e2f","Type":"ContainerStarted","Data":"9e900e860a549a70866b18d2d199367114b2c6db571275dc52173b8228f79832"} Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.640604 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.640660 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.657170 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.668359 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.668642 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.168614129 +0000 UTC m=+145.409355303 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.668986 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.671596 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.171580082 +0000 UTC m=+145.412321246 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.676015 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwq6" podStartSLOduration=124.675987399 podStartE2EDuration="2m4.675987399s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.672401127 +0000 UTC m=+144.913142291" watchObservedRunningTime="2025-11-24 17:50:55.675987399 +0000 UTC m=+144.916728563" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.723839 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" podStartSLOduration=124.723822987 podStartE2EDuration="2m4.723822987s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.722323699 +0000 UTC m=+144.963064863" watchObservedRunningTime="2025-11-24 17:50:55.723822987 +0000 UTC m=+144.964564151" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.770095 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.770706 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.270689872 +0000 UTC m=+145.511431036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.771499 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.774283 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.274266905 +0000 UTC m=+145.515008069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.842588 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-67bvv" podStartSLOduration=124.842566021 podStartE2EDuration="2m4.842566021s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.839269548 +0000 UTC m=+145.080010722" watchObservedRunningTime="2025-11-24 17:50:55.842566021 +0000 UTC m=+145.083307185" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.845840 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" podStartSLOduration=124.845830323 podStartE2EDuration="2m4.845830323s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.802050963 +0000 UTC m=+145.042792127" watchObservedRunningTime="2025-11-24 17:50:55.845830323 +0000 UTC m=+145.086571487" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.872969 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.873141 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.373108997 +0000 UTC m=+145.613850161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.875436 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.875899 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.375887694 +0000 UTC m=+145.616628858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.922329 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-42v6m" podStartSLOduration=124.922309025 podStartE2EDuration="2m4.922309025s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.88664138 +0000 UTC m=+145.127382554" watchObservedRunningTime="2025-11-24 17:50:55.922309025 +0000 UTC m=+145.163050179" Nov 24 17:50:55 crc kubenswrapper[4702]: I1124 17:50:55.978384 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:55 crc kubenswrapper[4702]: E1124 17:50:55.979025 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.47899928 +0000 UTC m=+145.719740444 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.001335 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dvrvn" podStartSLOduration=125.001314308 podStartE2EDuration="2m5.001314308s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.999420898 +0000 UTC m=+145.240162062" watchObservedRunningTime="2025-11-24 17:50:56.001314308 +0000 UTC m=+145.242055472" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.002119 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-krcnx" podStartSLOduration=125.002114383 podStartE2EDuration="2m5.002114383s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:55.97421907 +0000 UTC m=+145.214960254" watchObservedRunningTime="2025-11-24 17:50:56.002114383 +0000 UTC m=+145.242855547" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.036223 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-r5tsm" podStartSLOduration=125.036200309 podStartE2EDuration="2m5.036200309s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.035455166 +0000 UTC m=+145.276196330" watchObservedRunningTime="2025-11-24 17:50:56.036200309 +0000 UTC m=+145.276941463" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.077763 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.080168 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.080519 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.580504745 +0000 UTC m=+145.821245909 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.086572 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:50:56 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:50:56 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:50:56 crc kubenswrapper[4702]: healthz check failed Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.088014 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.115283 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bf65j" podStartSLOduration=125.115262212 podStartE2EDuration="2m5.115262212s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.077532032 +0000 UTC m=+145.318273206" watchObservedRunningTime="2025-11-24 17:50:56.115262212 +0000 UTC m=+145.356003376" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.152221 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-s65mg" podStartSLOduration=125.152200457 podStartE2EDuration="2m5.152200457s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.151448044 +0000 UTC m=+145.392189208" watchObservedRunningTime="2025-11-24 17:50:56.152200457 +0000 UTC m=+145.392941621" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.153923 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rx8zc" podStartSLOduration=125.153915641 podStartE2EDuration="2m5.153915641s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.114643582 +0000 UTC m=+145.355384756" watchObservedRunningTime="2025-11-24 17:50:56.153915641 +0000 UTC m=+145.394656805" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.183627 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.184110 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.684088585 +0000 UTC m=+145.924829749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.210406 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" podStartSLOduration=125.210386957 podStartE2EDuration="2m5.210386957s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.209367696 +0000 UTC m=+145.450108860" watchObservedRunningTime="2025-11-24 17:50:56.210386957 +0000 UTC m=+145.451128121" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.238308 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-9swhp" podStartSLOduration=6.23827834 podStartE2EDuration="6.23827834s" podCreationTimestamp="2025-11-24 17:50:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.234179042 +0000 UTC m=+145.474920206" watchObservedRunningTime="2025-11-24 17:50:56.23827834 +0000 UTC m=+145.479019504" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.284999 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.286569 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.78654074 +0000 UTC m=+146.027281904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.387465 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.388136 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.888114468 +0000 UTC m=+146.128855632 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.489183 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.489691 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:56.989666925 +0000 UTC m=+146.230408279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.591230 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.591705 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.091665255 +0000 UTC m=+146.332406419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.651651 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" event={"ID":"3d5c689a-2e2c-4938-8321-488b25ffab64","Type":"ContainerStarted","Data":"cae1461730c4547f47145e91106c7ce031ec3fecf7100e57d8e7c805a1bef7a3"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.656417 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" event={"ID":"544e3fcf-4e43-4ace-a2ce-f14c43862794","Type":"ContainerStarted","Data":"0845788d3d737d96ee4c31525d04574e5c2701cc885fbd2558b3f9bc1443e235"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.656620 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.658941 4702 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-545qh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.659014 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" podUID="544e3fcf-4e43-4ace-a2ce-f14c43862794" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.669493 4702 generic.go:334] "Generic (PLEG): container finished" podID="e4929a7f-bea6-4417-94f8-3dfdb325719d" containerID="eade6d55a17112e5d7a849a124305a88bdce27afb0593f13e5805c39f41428ba" exitCode=0 Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.669566 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" event={"ID":"e4929a7f-bea6-4417-94f8-3dfdb325719d","Type":"ContainerDied","Data":"eade6d55a17112e5d7a849a124305a88bdce27afb0593f13e5805c39f41428ba"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.674179 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" event={"ID":"4b165c9e-1c64-4ecd-985a-c345cdb468f2","Type":"ContainerStarted","Data":"046b07ab2d036f231ca4cb7cffeed7a5b93a4b318f1df7a1be96a596e4fc2946"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.674236 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" event={"ID":"4b165c9e-1c64-4ecd-985a-c345cdb468f2","Type":"ContainerStarted","Data":"1677cf08510feb7459d4b119f3299b8d52d2ff5ed79296b9c8b4633f683d4a31"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.677611 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-skdmk" podStartSLOduration=125.677573153 podStartE2EDuration="2m5.677573153s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.676072205 +0000 UTC m=+145.916813369" watchObservedRunningTime="2025-11-24 17:50:56.677573153 +0000 UTC m=+145.918314327" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.686615 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" event={"ID":"a2146f1c-7229-4a1f-8da8-0fca6ab8c424","Type":"ContainerStarted","Data":"902689a1198764f48858b8434edf37fac289802145cb1afbe70b7780c0e7503d"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.686679 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" event={"ID":"a2146f1c-7229-4a1f-8da8-0fca6ab8c424","Type":"ContainerStarted","Data":"fd6d3fffdb02b13b0e380ed3c69dd53e453f7f68cd51982ce1893c7716684455"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.694438 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.694986 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.194949186 +0000 UTC m=+146.435690350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.697191 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" event={"ID":"bdf5e1ee-978b-4c9e-964d-da16704faf96","Type":"ContainerStarted","Data":"e7ad297ab7bff9e57c313c4da1ef5456f0399f24f157c283abecdf0de0313607"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.697370 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.708275 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-f268p" podStartSLOduration=125.708247853 podStartE2EDuration="2m5.708247853s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.697478886 +0000 UTC m=+145.938220090" watchObservedRunningTime="2025-11-24 17:50:56.708247853 +0000 UTC m=+145.948989027" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.721635 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" event={"ID":"435d3ff9-5e63-414a-a5ea-1baf2d52c14c","Type":"ContainerStarted","Data":"ddd8ada8e8ae99da03a1381126ee58e5f547a3b88f83112b99e0eb309e7e3661"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.722370 4702 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t7l4h container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.722438 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.729732 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" event={"ID":"d2a41904-b404-4a22-88e0-5d947e877ced","Type":"ContainerStarted","Data":"75fd3fb92ac4eea25ecdeeb74da2ee7fdbf9d678b5458e1d0208fec90ab51a0f"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.745388 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" event={"ID":"89cda856-39cd-46fa-9efe-3df4eb1de216","Type":"ContainerStarted","Data":"8bd269fb14c7534b1a7a9edcf46fcd220ee4e76be8ae2bca0dcccc70fc1de923"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.753401 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-gpf8l" event={"ID":"275fc4d3-9726-42db-b4a0-6ce515b6f66f","Type":"ContainerStarted","Data":"f8bb642f94ea39b2143891048e89696b707f732027fd0184804a1df4e6845263"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.753461 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-gpf8l" event={"ID":"275fc4d3-9726-42db-b4a0-6ce515b6f66f","Type":"ContainerStarted","Data":"f7e0c12fbd8114995f0eb657850bfdea7fed7c609b861b8887df77ecf453eda9"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.758524 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" podStartSLOduration=125.758477754 podStartE2EDuration="2m5.758477754s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.752058373 +0000 UTC m=+145.992799537" watchObservedRunningTime="2025-11-24 17:50:56.758477754 +0000 UTC m=+145.999218928" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.766711 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" event={"ID":"a04e13e7-ff96-4153-b994-e9ccbe20b3ce","Type":"ContainerStarted","Data":"ce6c9941c5086735a29ae9b701412faac9658f3b62d0b6694fd09d7ecd9da9a5"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.767932 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.772318 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-hw6dk" podStartSLOduration=125.772293496 podStartE2EDuration="2m5.772293496s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.772239454 +0000 UTC m=+146.012980618" watchObservedRunningTime="2025-11-24 17:50:56.772293496 +0000 UTC m=+146.013034660" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.777568 4702 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-4jkxd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.777635 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" podUID="a04e13e7-ff96-4153-b994-e9ccbe20b3ce" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.781037 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" event={"ID":"65952c6c-017f-4092-b799-6ce4cda03518","Type":"ContainerStarted","Data":"85700d59694ee35f6696cdc6d974a852ab6a2af54aea907844dac3ba23173d62"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.781094 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" event={"ID":"65952c6c-017f-4092-b799-6ce4cda03518","Type":"ContainerStarted","Data":"a6ccf343f1bac81617b6ee9450c9f0c841e19629a7ad4fb3b45c7f8b91ff630d"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.798160 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.803376 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.303340077 +0000 UTC m=+146.544081241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.808319 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" event={"ID":"ccfef0a8-abca-4437-8726-a1ddd184a798","Type":"ContainerStarted","Data":"78de85d99113f4582691864fc93f3bf2d20752c11e4414dafd255e11430e0456"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.808373 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" event={"ID":"ccfef0a8-abca-4437-8726-a1ddd184a798","Type":"ContainerStarted","Data":"4ba05c4fda11a58b4f2bc45004b7f4b909041b46d7c010d32cd666ecd80f7fcb"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.808389 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" event={"ID":"ccfef0a8-abca-4437-8726-a1ddd184a798","Type":"ContainerStarted","Data":"5ebb866e71c3170bb109b277b1dc77e9ea2f4c9a7668a8e39d1d07e673c2c6bc"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.824209 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" event={"ID":"837cbb40-8ba1-4602-8bab-3457eec318cb","Type":"ContainerStarted","Data":"35f9eaff86673cdeb52aad95a2756444d7ef1dfef881e388b65274ec18311829"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.826395 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.829841 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4l7wj" podStartSLOduration=125.829814325 podStartE2EDuration="2m5.829814325s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.801122608 +0000 UTC m=+146.041863772" watchObservedRunningTime="2025-11-24 17:50:56.829814325 +0000 UTC m=+146.070555509" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.841082 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" podStartSLOduration=125.841055177 podStartE2EDuration="2m5.841055177s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.829267038 +0000 UTC m=+146.070008222" watchObservedRunningTime="2025-11-24 17:50:56.841055177 +0000 UTC m=+146.081796341" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.871205 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" podStartSLOduration=125.871186349 podStartE2EDuration="2m5.871186349s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.870854719 +0000 UTC m=+146.111595903" watchObservedRunningTime="2025-11-24 17:50:56.871186349 +0000 UTC m=+146.111927503" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.874316 4702 generic.go:334] "Generic (PLEG): container finished" podID="1fca2414-ce67-496f-a33b-b120986eba4c" containerID="663afa531b298a1512d2d89e88dd2df5c619fba0bd3438ee22995b41b347264f" exitCode=0 Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.875218 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" event={"ID":"1fca2414-ce67-496f-a33b-b120986eba4c","Type":"ContainerDied","Data":"663afa531b298a1512d2d89e88dd2df5c619fba0bd3438ee22995b41b347264f"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.898405 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" podStartSLOduration=125.898388771 podStartE2EDuration="2m5.898388771s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.896702798 +0000 UTC m=+146.137443962" watchObservedRunningTime="2025-11-24 17:50:56.898388771 +0000 UTC m=+146.139129935" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.902075 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" event={"ID":"ea1bd1a9-9346-4125-9af6-4c2d68e4de12","Type":"ContainerStarted","Data":"a3ebaf5fdcfe3020d268b922fbf59996d4055ca0775bb99f23e5b01aa199ea77"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.902114 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" event={"ID":"ea1bd1a9-9346-4125-9af6-4c2d68e4de12","Type":"ContainerStarted","Data":"c111c6489e135fec0303d8f3676962e1b03beabbee5d6659fdd66f2b28ce1b3e"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.906582 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" event={"ID":"3aed5125-9645-4a10-899e-175356a63e8e","Type":"ContainerStarted","Data":"813c7290bbb5f5d18bd4ff5b37806c21a52e8239b3c9aa52040cb6560ee8d664"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.906642 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" event={"ID":"3aed5125-9645-4a10-899e-175356a63e8e","Type":"ContainerStarted","Data":"776f29d1edab7bfad653e5f28f3d3415ff0fee9771c89ff98ac58c11491c9789"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.908600 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" event={"ID":"7d4414e1-85cd-43a7-a232-8f4b285cd09c","Type":"ContainerStarted","Data":"1af30d08b7f199213ebbe4ceb33c7d891785e1c456b85f30da627807d3635e73"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.909397 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.915028 4702 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mmlk2 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.915095 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" podUID="7d4414e1-85cd-43a7-a232-8f4b285cd09c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.921187 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" event={"ID":"4f870073-6820-465d-9c8c-9d6a39c46b3d","Type":"ContainerStarted","Data":"433aa7ab5bdb644bf5f3e7f775f7dd5fc3330ccd6fc28c816888585a09b4edb5"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.921236 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" event={"ID":"4f870073-6820-465d-9c8c-9d6a39c46b3d","Type":"ContainerStarted","Data":"f4fb013d027702d30d345e9004ea6d23f8c89b6fb1143f78d761dda4e84bef94"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.933056 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" event={"ID":"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c","Type":"ContainerStarted","Data":"8e4f35fd5e11e9ebeaa19e0031be666b1cbbdc49f0c8401fcc6c11d8bd781f24"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.933139 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" event={"ID":"9af41043-704d-4ab0-bd1e-f41bb5cd9a8c","Type":"ContainerStarted","Data":"952a1d0604fd23e70b1c204c0a092192008e55315b101ce4da6c0d80e57210a5"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.949989 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" podStartSLOduration=125.949967084 podStartE2EDuration="2m5.949967084s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.925972484 +0000 UTC m=+146.166713638" watchObservedRunningTime="2025-11-24 17:50:56.949967084 +0000 UTC m=+146.190708258" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.952599 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-gpf8l" podStartSLOduration=6.9525886759999995 podStartE2EDuration="6.952588676s" podCreationTimestamp="2025-11-24 17:50:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.952380519 +0000 UTC m=+146.193121693" watchObservedRunningTime="2025-11-24 17:50:56.952588676 +0000 UTC m=+146.193329840" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.960159 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:56 crc kubenswrapper[4702]: E1124 17:50:56.961412 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.461393081 +0000 UTC m=+146.702134245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.983996 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" event={"ID":"c5416587-18c5-467b-99a9-f3bf9ba3c01e","Type":"ContainerStarted","Data":"0c909bc985df610d55179b11057bf786d222481133c9f450d1f6e79eb9d55a43"} Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.985743 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dlzxp" podStartSLOduration=125.985730443 podStartE2EDuration="2m5.985730443s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:56.980818439 +0000 UTC m=+146.221559613" watchObservedRunningTime="2025-11-24 17:50:56.985730443 +0000 UTC m=+146.226471607" Nov 24 17:50:56 crc kubenswrapper[4702]: I1124 17:50:56.997073 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" event={"ID":"aa55d4fd-a2ed-4b4f-acb5-03b1704de025","Type":"ContainerStarted","Data":"9a45f7fb7a2d6de8e7670fd5fb7f6e46a84399b6976b568b7b18c63195759be5"} Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.014505 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-th6sc" podStartSLOduration=126.014482752 podStartE2EDuration="2m6.014482752s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.012624515 +0000 UTC m=+146.253365689" watchObservedRunningTime="2025-11-24 17:50:57.014482752 +0000 UTC m=+146.255223916" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.035000 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5bsls" event={"ID":"b28247d0-f3b1-4d4b-bed4-0af181a81dcf","Type":"ContainerStarted","Data":"13487f16e49cad1514568e8e22befc9191a14921d214c794587f4c5d49e1da45"} Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.035079 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5bsls" event={"ID":"b28247d0-f3b1-4d4b-bed4-0af181a81dcf","Type":"ContainerStarted","Data":"d9adb0a1795f04fc596713f75fed7018f69fced97d531766e916c7eec985d8bc"} Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.035103 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-5bsls" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.037747 4702 patch_prober.go:28] interesting pod/console-operator-58897d9998-krcnx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.037822 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-krcnx" podUID="ae2f2e1f-429f-4a8b-9556-26910294ab6e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.038586 4702 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zs4n4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.038710 4702 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-sjf4l container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.038746 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.038792 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.046003 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x62jl" podStartSLOduration=126.045978237 podStartE2EDuration="2m6.045978237s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.045634167 +0000 UTC m=+146.286375331" watchObservedRunningTime="2025-11-24 17:50:57.045978237 +0000 UTC m=+146.286719401" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.069764 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.073145 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.573122737 +0000 UTC m=+146.813863901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.077117 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.077313 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k4vgs" podStartSLOduration=126.077287677 podStartE2EDuration="2m6.077287677s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.068590635 +0000 UTC m=+146.309331809" watchObservedRunningTime="2025-11-24 17:50:57.077287677 +0000 UTC m=+146.318028841" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.079723 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.579684363 +0000 UTC m=+146.820425747 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.092471 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:50:57 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:50:57 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:50:57 crc kubenswrapper[4702]: healthz check failed Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.092522 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.129350 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" podStartSLOduration=126.129326815 podStartE2EDuration="2m6.129326815s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.123882024 +0000 UTC m=+146.364623188" watchObservedRunningTime="2025-11-24 17:50:57.129326815 +0000 UTC m=+146.370067979" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.144918 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dt4m6" podStartSLOduration=126.144897492 podStartE2EDuration="2m6.144897492s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.142872829 +0000 UTC m=+146.383614013" watchObservedRunningTime="2025-11-24 17:50:57.144897492 +0000 UTC m=+146.385638656" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.184693 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.184826 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.68478928 +0000 UTC m=+146.925530444 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.185158 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.185491 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.685483462 +0000 UTC m=+146.926224616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.192555 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" podStartSLOduration=126.192536053 podStartE2EDuration="2m6.192536053s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.170201404 +0000 UTC m=+146.410942568" watchObservedRunningTime="2025-11-24 17:50:57.192536053 +0000 UTC m=+146.433277217" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.193781 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fdlhm" podStartSLOduration=126.193773101 podStartE2EDuration="2m6.193773101s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.190791508 +0000 UTC m=+146.431532672" watchObservedRunningTime="2025-11-24 17:50:57.193773101 +0000 UTC m=+146.434514265" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.211670 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-5bsls" podStartSLOduration=7.211653601 podStartE2EDuration="7.211653601s" podCreationTimestamp="2025-11-24 17:50:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:57.211330871 +0000 UTC m=+146.452072035" watchObservedRunningTime="2025-11-24 17:50:57.211653601 +0000 UTC m=+146.452394765" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.286925 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.287277 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.787238855 +0000 UTC m=+147.027980029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.287853 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.288300 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.788291538 +0000 UTC m=+147.029032702 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.389146 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.389319 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.889291967 +0000 UTC m=+147.130033141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.389767 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.390275 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.890249247 +0000 UTC m=+147.130990651 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.490552 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.490705 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.990682789 +0000 UTC m=+147.231423963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.490929 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.491543 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:57.991528376 +0000 UTC m=+147.232269550 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.554895 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.592079 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.592344 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.092308928 +0000 UTC m=+147.333050102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.592537 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.593125 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.093105944 +0000 UTC m=+147.333847098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.694012 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.694243 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.194207537 +0000 UTC m=+147.434948711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.694298 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.695146 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.195133215 +0000 UTC m=+147.435874579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.796017 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.796839 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.296815196 +0000 UTC m=+147.537556360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:57 crc kubenswrapper[4702]: I1124 17:50:57.898188 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:57 crc kubenswrapper[4702]: E1124 17:50:57.898740 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.398712414 +0000 UTC m=+147.639453788 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:57.999089 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:57.999402 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.499367743 +0000 UTC m=+147.740108907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.000167 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.000871 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.500858209 +0000 UTC m=+147.741599383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.040262 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" event={"ID":"e4929a7f-bea6-4417-94f8-3dfdb325719d","Type":"ContainerStarted","Data":"23f841c6b1d2a67e7a9108f19cddc21855f6c710ba995582fea2b3f75694d536"} Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.044249 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" event={"ID":"1fca2414-ce67-496f-a33b-b120986eba4c","Type":"ContainerStarted","Data":"8b5957a4b3fe75e9e919d198534adf9c807c6c02ab4d2d667b1ce85c600af02c"} Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.046015 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" event={"ID":"3aed5125-9645-4a10-899e-175356a63e8e","Type":"ContainerStarted","Data":"6bcb8cae5f928f655542815a2151111a099c38b2e0c65e2e5c78199ebfc8c243"} Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.048904 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5bsls" event={"ID":"b28247d0-f3b1-4d4b-bed4-0af181a81dcf","Type":"ContainerStarted","Data":"d7bbb5f4ae399adf46639f9f2d33d28302d828c005e40af1e07388b175568407"} Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.054593 4702 patch_prober.go:28] interesting pod/console-operator-58897d9998-krcnx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.054662 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-krcnx" podUID="ae2f2e1f-429f-4a8b-9556-26910294ab6e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055005 4702 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mmlk2 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055064 4702 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-545qh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055072 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" podUID="7d4414e1-85cd-43a7-a232-8f4b285cd09c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055092 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" podUID="544e3fcf-4e43-4ace-a2ce-f14c43862794" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055012 4702 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zs4n4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055130 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055423 4702 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-4jkxd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.055458 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" podUID="a04e13e7-ff96-4153-b994-e9ccbe20b3ce" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.053987 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-78ghc" event={"ID":"c5416587-18c5-467b-99a9-f3bf9ba3c01e","Type":"ContainerStarted","Data":"470cf4d8bbbccd1c843307837f170bf44c6682e133e8d1fb77883046414dfa12"} Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.060551 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.079499 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:50:58 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:50:58 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:50:58 crc kubenswrapper[4702]: healthz check failed Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.079585 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.091198 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" podStartSLOduration=127.091172205 podStartE2EDuration="2m7.091172205s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:58.087588432 +0000 UTC m=+147.328329596" watchObservedRunningTime="2025-11-24 17:50:58.091172205 +0000 UTC m=+147.331913369" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.101914 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.102113 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.602083756 +0000 UTC m=+147.842824920 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.108732 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.111609 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.611590374 +0000 UTC m=+147.852331538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.121268 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-gb2f9" podStartSLOduration=127.121244345 podStartE2EDuration="2m7.121244345s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:58.118062516 +0000 UTC m=+147.358803680" watchObservedRunningTime="2025-11-24 17:50:58.121244345 +0000 UTC m=+147.361985519" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.209954 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.210167 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.710138807 +0000 UTC m=+147.950879971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.210266 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.210826 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.710809777 +0000 UTC m=+147.951550941 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.311141 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.311256 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.811237779 +0000 UTC m=+148.051978943 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.311764 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.312221 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.812198329 +0000 UTC m=+148.052939513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.412551 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.413009 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:58.912991252 +0000 UTC m=+148.153732416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.514152 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.514476 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.014464157 +0000 UTC m=+148.255205321 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.538878 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.540029 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.540100 4702 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-k44cv container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.540139 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" podUID="e4929a7f-bea6-4417-94f8-3dfdb325719d" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.615340 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.615827 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.115811918 +0000 UTC m=+148.356553082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.721058 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.721456 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.221443243 +0000 UTC m=+148.462184407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.822740 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.822974 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.322945288 +0000 UTC m=+148.563686442 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.823252 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.823631 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.323621919 +0000 UTC m=+148.564363083 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:58 crc kubenswrapper[4702]: I1124 17:50:58.924998 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:58 crc kubenswrapper[4702]: E1124 17:50:58.925582 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.425560357 +0000 UTC m=+148.666301521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.027006 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.027373 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.527360683 +0000 UTC m=+148.768101847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.060887 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" event={"ID":"be7d235a-3711-426b-a518-8937496a4db1","Type":"ContainerStarted","Data":"bfa2d45750c1fe3217e912a7eef7371da8bde546d84455b8b89d5e5e1b6d3055"} Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.063324 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" event={"ID":"1fca2414-ce67-496f-a33b-b120986eba4c","Type":"ContainerStarted","Data":"ded8db7364dc2dddca5f2401657948660f2bb91193534b149bd8aa18bbd3723a"} Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.063975 4702 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mmlk2 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.064027 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" podUID="7d4414e1-85cd-43a7-a232-8f4b285cd09c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.076994 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:50:59 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:50:59 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:50:59 crc kubenswrapper[4702]: healthz check failed Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.077055 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.097389 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" podStartSLOduration=128.097367793 podStartE2EDuration="2m8.097367793s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:50:59.094281746 +0000 UTC m=+148.335022930" watchObservedRunningTime="2025-11-24 17:50:59.097367793 +0000 UTC m=+148.338108957" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.128585 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.128817 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.628766245 +0000 UTC m=+148.869507409 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.130930 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.131847 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.631832231 +0000 UTC m=+148.872573395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.232868 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.233068 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.733036626 +0000 UTC m=+148.973777790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.233225 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.233522 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.733513461 +0000 UTC m=+148.974254625 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.334341 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.334513 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.83448324 +0000 UTC m=+149.075224404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.334683 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.335066 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.835051198 +0000 UTC m=+149.075792362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.340234 4702 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-4nf8m container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.340279 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" podUID="837cbb40-8ba1-4602-8bab-3457eec318cb" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.340287 4702 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-4nf8m container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.340353 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" podUID="837cbb40-8ba1-4602-8bab-3457eec318cb" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.435353 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.435507 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.93548924 +0000 UTC m=+149.176230404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.435570 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.435877 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:50:59.935868882 +0000 UTC m=+149.176610046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.537622 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.537861 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.037824521 +0000 UTC m=+149.278565685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.537944 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.538005 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.538118 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.538194 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.538255 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.538466 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.038453441 +0000 UTC m=+149.279194795 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.539395 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.545648 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.561462 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.561723 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.569127 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.579152 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.638959 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.639175 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.139142761 +0000 UTC m=+149.379883925 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.639282 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.639651 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.139637906 +0000 UTC m=+149.380379070 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.657375 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4jkxd" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.740581 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.740870 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.240790291 +0000 UTC m=+149.481531465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.741046 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.741384 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.24136982 +0000 UTC m=+149.482110984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.842552 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.843495 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.343476813 +0000 UTC m=+149.584217977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.869046 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:50:59 crc kubenswrapper[4702]: I1124 17:50:59.948698 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:50:59 crc kubenswrapper[4702]: E1124 17:50:59.949106 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.449092508 +0000 UTC m=+149.689833672 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.051275 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.051892 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.551873762 +0000 UTC m=+149.792614926 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.110415 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:00 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:00 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:00 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.110817 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.157172 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.158714 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.658695925 +0000 UTC m=+149.899437089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.258229 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.258489 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.758455945 +0000 UTC m=+149.999197109 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.258540 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.258913 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.758904639 +0000 UTC m=+149.999645803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.368586 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.382596 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.882567489 +0000 UTC m=+150.123308653 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.487615 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.487966 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:00.987952315 +0000 UTC m=+150.228693479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: W1124 17:51:00.534669 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-8d650155a232ff059e63fe8a3afd5b498ef12b4d94b74af132e87d260048b4ef WatchSource:0}: Error finding container 8d650155a232ff059e63fe8a3afd5b498ef12b4d94b74af132e87d260048b4ef: Status 404 returned error can't find the container with id 8d650155a232ff059e63fe8a3afd5b498ef12b4d94b74af132e87d260048b4ef Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.590861 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.591207 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.091189935 +0000 UTC m=+150.331931099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.692322 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.692828 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.192785353 +0000 UTC m=+150.433526517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.793851 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.794069 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.29403612 +0000 UTC m=+150.534777284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.794571 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.794975 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.29496124 +0000 UTC m=+150.535702404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.895279 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.895491 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.395461864 +0000 UTC m=+150.636203038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:00 crc kubenswrapper[4702]: I1124 17:51:00.895605 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:00 crc kubenswrapper[4702]: E1124 17:51:00.895992 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.39597377 +0000 UTC m=+150.636714934 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.000701 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.000899 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.500869641 +0000 UTC m=+150.741610805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.001012 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.001333 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.501323835 +0000 UTC m=+150.742064999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.083280 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:01 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:01 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:01 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.083351 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.099065 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a2443dc35d44fce7dc90068e43c4778329be8b5e2391f5ed98b1d2e0f3c0a220"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.099125 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"7539b5a509fe63a6be9b2f1a238ebe5c51f3e1b7f6b4e15a3fe7ee950257db7e"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.101682 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.101898 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.601878601 +0000 UTC m=+150.842619765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.101947 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.102229 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.602221421 +0000 UTC m=+150.842962585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.108103 4702 generic.go:334] "Generic (PLEG): container finished" podID="d2a41904-b404-4a22-88e0-5d947e877ced" containerID="75fd3fb92ac4eea25ecdeeb74da2ee7fdbf9d678b5458e1d0208fec90ab51a0f" exitCode=0 Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.108184 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" event={"ID":"d2a41904-b404-4a22-88e0-5d947e877ced","Type":"ContainerDied","Data":"75fd3fb92ac4eea25ecdeeb74da2ee7fdbf9d678b5458e1d0208fec90ab51a0f"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.125421 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" event={"ID":"be7d235a-3711-426b-a518-8937496a4db1","Type":"ContainerStarted","Data":"8edf27ca2113cac8d259aee4f556213b1eaf7a2adaaab35fcd9213dced57884b"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.131743 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"cd487ac0d519aeacda99bff4138e205b039ba2afa8fa696c271f4d374b7db4d1"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.131824 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5b703848916b2c9bee28cbf9fc586cbc69f4ea69abcb50e24976808c3c7acde5"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.132037 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.133847 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"61d81aed3467766300e52b1396d9808a0675de41ee7eec3a320faf164ba378be"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.133875 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8d650155a232ff059e63fe8a3afd5b498ef12b4d94b74af132e87d260048b4ef"} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.203209 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.203401 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.703372476 +0000 UTC m=+150.944113640 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.203642 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.204413 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.704395238 +0000 UTC m=+150.945136582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.304568 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.304724 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.804695656 +0000 UTC m=+151.045436820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.305018 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.305662 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.805639185 +0000 UTC m=+151.046380519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.377051 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.378280 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.381507 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.391604 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.406248 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.406667 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:01.906648015 +0000 UTC m=+151.147389189 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.486832 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.487563 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.490617 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.490868 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.507960 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.508010 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.508044 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptxd6\" (UniqueName: \"kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.508077 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.508427 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:02.008413858 +0000 UTC m=+151.249155022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.520573 4702 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.542022 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.575304 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.576214 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.578696 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.592748 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.608734 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609166 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptxd6\" (UniqueName: \"kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609214 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609238 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609286 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609327 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609736 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.609847 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:51:02.109821091 +0000 UTC m=+151.350562255 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.609980 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.644282 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptxd6\" (UniqueName: \"kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6\") pod \"certified-operators-cccg8\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.696070 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710370 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710424 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710455 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710496 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6slx9\" (UniqueName: \"kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710528 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710537 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.710549 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: E1124 17:51:01.710843 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:51:02.210829671 +0000 UTC m=+151.451570835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5lncr" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.742567 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.776306 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.776401 4702 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-24T17:51:01.52060572Z","Handler":null,"Name":""} Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.777978 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.787159 4702 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.787193 4702 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.794406 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.803049 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.811891 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.812257 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.812372 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.812410 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6slx9\" (UniqueName: \"kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.813427 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.813552 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.821512 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.834275 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6slx9\" (UniqueName: \"kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9\") pod \"community-operators-vt8ck\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.890155 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.912894 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.913903 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkr98\" (UniqueName: \"kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.913965 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.914032 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.914066 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.916789 4702 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.916858 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.975686 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5lncr\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.991165 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.995670 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:01 crc kubenswrapper[4702]: I1124 17:51:01.998508 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.015608 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkr98\" (UniqueName: \"kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.015681 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.015749 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.016389 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.016419 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.039959 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkr98\" (UniqueName: \"kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98\") pod \"certified-operators-9lqsr\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.078682 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:02 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:02 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:02 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.078834 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.092373 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.106309 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.106987 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.126009 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.126091 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.126173 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nskw6\" (UniqueName: \"kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.177784 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.182096 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b","Type":"ContainerStarted","Data":"18e534bdc95bbf9c9bc290202b94d1a0d5b0b304336dd806f08a13645d3f784d"} Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.190861 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" event={"ID":"be7d235a-3711-426b-a518-8937496a4db1","Type":"ContainerStarted","Data":"48aef6a625cfdc959057ced1bf0cd41ce4ff2b1dffc3c30dcb3066b73cdbaf02"} Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.191105 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" event={"ID":"be7d235a-3711-426b-a518-8937496a4db1","Type":"ContainerStarted","Data":"bf3a05d6137a9decaa1db61d650df00185a6db4729cbae045d7cab7ee75ee821"} Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.194311 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerStarted","Data":"6b38eb2bc1c708eb3931fa06a906d2c8693331b04bf004837ce0bd198ec541aa"} Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.227349 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-vwg94" podStartSLOduration=12.227320999 podStartE2EDuration="12.227320999s" podCreationTimestamp="2025-11-24 17:50:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:51:02.213334761 +0000 UTC m=+151.454075945" watchObservedRunningTime="2025-11-24 17:51:02.227320999 +0000 UTC m=+151.468062173" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.229511 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.229559 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.229605 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nskw6\" (UniqueName: \"kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.230868 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.231400 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.270222 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nskw6\" (UniqueName: \"kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6\") pod \"community-operators-z7fl6\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.350683 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4nf8m" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.382940 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:02 crc kubenswrapper[4702]: E1124 17:51:02.471669 4702 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74f9355e_0937_46d3_892a_a2eba2be98d6.slice/crio-conmon-588a1beb8c38b9014a06e593fb5645e592d26feb587298d987d3a92bc6903c9f.scope\": RecentStats: unable to find data in memory cache]" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.499290 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.507046 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:51:02 crc kubenswrapper[4702]: W1124 17:51:02.529133 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddcbd5701_dced_406b_8cf3_0366e6c0f95b.slice/crio-2b28386e8b6288edd9594787c640fc33b06c2d13217f211da415abaea90d92d6 WatchSource:0}: Error finding container 2b28386e8b6288edd9594787c640fc33b06c2d13217f211da415abaea90d92d6: Status 404 returned error can't find the container with id 2b28386e8b6288edd9594787c640fc33b06c2d13217f211da415abaea90d92d6 Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.599079 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.637187 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnhcj\" (UniqueName: \"kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj\") pod \"d2a41904-b404-4a22-88e0-5d947e877ced\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.637617 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume\") pod \"d2a41904-b404-4a22-88e0-5d947e877ced\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.637664 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume\") pod \"d2a41904-b404-4a22-88e0-5d947e877ced\" (UID: \"d2a41904-b404-4a22-88e0-5d947e877ced\") " Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.638272 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume" (OuterVolumeSpecName: "config-volume") pod "d2a41904-b404-4a22-88e0-5d947e877ced" (UID: "d2a41904-b404-4a22-88e0-5d947e877ced"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.642410 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d2a41904-b404-4a22-88e0-5d947e877ced" (UID: "d2a41904-b404-4a22-88e0-5d947e877ced"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.643179 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj" (OuterVolumeSpecName: "kube-api-access-fnhcj") pod "d2a41904-b404-4a22-88e0-5d947e877ced" (UID: "d2a41904-b404-4a22-88e0-5d947e877ced"). InnerVolumeSpecName "kube-api-access-fnhcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.651848 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.742730 4702 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2a41904-b404-4a22-88e0-5d947e877ced-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.742776 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnhcj\" (UniqueName: \"kubernetes.io/projected/d2a41904-b404-4a22-88e0-5d947e877ced-kube-api-access-fnhcj\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:02 crc kubenswrapper[4702]: I1124 17:51:02.742790 4702 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2a41904-b404-4a22-88e0-5d947e877ced-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.076743 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:03 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:03 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:03 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.077024 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.202350 4702 generic.go:334] "Generic (PLEG): container finished" podID="9600d94e-db9c-4688-91d6-6356beb6987a" containerID="e85aa0fffa793305569f7e14085856952bd39c10eaaeb4a65f86bcb6322713af" exitCode=0 Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.202452 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerDied","Data":"e85aa0fffa793305569f7e14085856952bd39c10eaaeb4a65f86bcb6322713af"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.202502 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerStarted","Data":"bfed549d10e4466d1090156d93339ac3034072c47bff77985b2791d49a37bdf9"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.204093 4702 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.204280 4702 generic.go:334] "Generic (PLEG): container finished" podID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerID="7fb71b73f1f908237e2896d3c2dd3beea7807c5d54f7324b94826d2b31a37ed8" exitCode=0 Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.204323 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerDied","Data":"7fb71b73f1f908237e2896d3c2dd3beea7807c5d54f7324b94826d2b31a37ed8"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.207360 4702 generic.go:334] "Generic (PLEG): container finished" podID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerID="588a1beb8c38b9014a06e593fb5645e592d26feb587298d987d3a92bc6903c9f" exitCode=0 Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.207612 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerDied","Data":"588a1beb8c38b9014a06e593fb5645e592d26feb587298d987d3a92bc6903c9f"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.207710 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerStarted","Data":"647c1ef798f33a5be7fa9bb59ba0e78934affd8388a3541089d4accd5249edd5"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.208954 4702 generic.go:334] "Generic (PLEG): container finished" podID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerID="84b26b87ac3f2fa2aae8a3208955c9ea3dd402e885c5ba5f4037ac07eb4a9854" exitCode=0 Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.209025 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerDied","Data":"84b26b87ac3f2fa2aae8a3208955c9ea3dd402e885c5ba5f4037ac07eb4a9854"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.209063 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerStarted","Data":"97548912203baf7997b02da562014b20d891e1371743240a53752adc848a74e1"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.219193 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" event={"ID":"dcbd5701-dced-406b-8cf3-0366e6c0f95b","Type":"ContainerStarted","Data":"7c2d4d04c88a3a10006770481abb0dca6ee2eef732006bb79c5ae6a0bb04cb1c"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.219244 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" event={"ID":"dcbd5701-dced-406b-8cf3-0366e6c0f95b","Type":"ContainerStarted","Data":"2b28386e8b6288edd9594787c640fc33b06c2d13217f211da415abaea90d92d6"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.220049 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.222547 4702 generic.go:334] "Generic (PLEG): container finished" podID="d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" containerID="462607e9630b8a2f2ff53492699968d4ca875ac6d3792fc9dc4e545268958689" exitCode=0 Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.222589 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b","Type":"ContainerDied","Data":"462607e9630b8a2f2ff53492699968d4ca875ac6d3792fc9dc4e545268958689"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.224700 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" event={"ID":"d2a41904-b404-4a22-88e0-5d947e877ced","Type":"ContainerDied","Data":"0e29db102f52c87bb2267735954de7f5b20e5df375bc34220e8fca4a30a50cce"} Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.224729 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e29db102f52c87bb2267735954de7f5b20e5df375bc34220e8fca4a30a50cce" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.224756 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-wrxpp" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.269404 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" podStartSLOduration=132.269387218 podStartE2EDuration="2m12.269387218s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:51:03.266113236 +0000 UTC m=+152.506854400" watchObservedRunningTime="2025-11-24 17:51:03.269387218 +0000 UTC m=+152.510128382" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.319777 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.319864 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.319861 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.319890 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.353048 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.392011 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.392617 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.394382 4702 patch_prober.go:28] interesting pod/console-f9d7485db-67bvv container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.394439 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-67bvv" podUID="b1bc1040-fa30-45f9-ab55-54673b3536a2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.470916 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.470977 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.477404 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.499648 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.545008 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.554642 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-k44cv" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.579389 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:51:03 crc kubenswrapper[4702]: E1124 17:51:03.579925 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2a41904-b404-4a22-88e0-5d947e877ced" containerName="collect-profiles" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.580023 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2a41904-b404-4a22-88e0-5d947e877ced" containerName="collect-profiles" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.580226 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2a41904-b404-4a22-88e0-5d947e877ced" containerName="collect-profiles" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.581516 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.583498 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.588913 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.659464 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.664403 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lphsk\" (UniqueName: \"kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.664469 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.664592 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.669492 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-krcnx" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.766078 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.766238 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.766322 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lphsk\" (UniqueName: \"kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.766637 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.766730 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.783818 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lphsk\" (UniqueName: \"kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk\") pod \"redhat-marketplace-5xbxf\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.864299 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-545qh" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.886487 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mmlk2" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.896647 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.980247 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.984889 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:03 crc kubenswrapper[4702]: I1124 17:51:03.989228 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.070993 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.071144 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgw7m\" (UniqueName: \"kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.071218 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.073682 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.076966 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:04 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:04 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:04 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.077019 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.172617 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgw7m\" (UniqueName: \"kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.173320 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.173423 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.174155 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.174642 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.190006 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgw7m\" (UniqueName: \"kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m\") pod \"redhat-marketplace-jtdv7\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.236074 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-k4zhk" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.310272 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.349187 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.677560 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.680364 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.714832 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.716972 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.719715 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.729738 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815437 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir\") pod \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815559 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" (UID: "d8258fa2-5c1c-44a2-ada8-5757cfde3b2b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815592 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access\") pod \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\" (UID: \"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b\") " Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815865 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815927 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.815956 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqpwf\" (UniqueName: \"kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.816088 4702 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.825497 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" (UID: "d8258fa2-5c1c-44a2-ada8-5757cfde3b2b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.918475 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.918530 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.918555 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqpwf\" (UniqueName: \"kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.918620 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8258fa2-5c1c-44a2-ada8-5757cfde3b2b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.919215 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.919561 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.934542 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqpwf\" (UniqueName: \"kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf\") pod \"redhat-operators-wjgnw\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.980678 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:04 crc kubenswrapper[4702]: E1124 17:51:04.982855 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" containerName="pruner" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.982880 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" containerName="pruner" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.983073 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8258fa2-5c1c-44a2-ada8-5757cfde3b2b" containerName="pruner" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.984224 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:04 crc kubenswrapper[4702]: I1124 17:51:04.991074 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.035030 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.079237 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:05 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:05 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:05 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.079376 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.122677 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.122996 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4488g\" (UniqueName: \"kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.123067 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.226012 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.226103 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.226172 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4488g\" (UniqueName: \"kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.227090 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.227413 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.247359 4702 generic.go:334] "Generic (PLEG): container finished" podID="12912be2-7861-4e86-9902-789cb8ae519f" containerID="5c7af378d9414269b353f3830182f1f51c25817325ac9073272481d2cfb8cacb" exitCode=0 Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.247447 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerDied","Data":"5c7af378d9414269b353f3830182f1f51c25817325ac9073272481d2cfb8cacb"} Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.247482 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerStarted","Data":"6461ced1a40d2649d7a7a7da3208dc2262f91c8b48d81efd036b3ea8ad465c0c"} Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.250036 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4488g\" (UniqueName: \"kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g\") pod \"redhat-operators-mn5kp\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.250228 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.250223 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d8258fa2-5c1c-44a2-ada8-5757cfde3b2b","Type":"ContainerDied","Data":"18e534bdc95bbf9c9bc290202b94d1a0d5b0b304336dd806f08a13645d3f784d"} Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.250288 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18e534bdc95bbf9c9bc290202b94d1a0d5b0b304336dd806f08a13645d3f784d" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.253336 4702 generic.go:334] "Generic (PLEG): container finished" podID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerID="140156c9d61fcf2a845ade49045dc11c6ab62634afd4625ff9b819ef1e888ef2" exitCode=0 Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.254378 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerDied","Data":"140156c9d61fcf2a845ade49045dc11c6ab62634afd4625ff9b819ef1e888ef2"} Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.254404 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerStarted","Data":"5c78987b416f1fd711893eb59ab3e574d284cb9ddb852c392a968c89b3773c81"} Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.299977 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.359099 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:51:05 crc kubenswrapper[4702]: W1124 17:51:05.384923 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f0763a6_8199_42ee_8388_0b10e1061fa2.slice/crio-06b4f9916444ea9fa2a3bfc97f8e0f66ec0ab18ba4d87423dc1fa3f046799420 WatchSource:0}: Error finding container 06b4f9916444ea9fa2a3bfc97f8e0f66ec0ab18ba4d87423dc1fa3f046799420: Status 404 returned error can't find the container with id 06b4f9916444ea9fa2a3bfc97f8e0f66ec0ab18ba4d87423dc1fa3f046799420 Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.561155 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.563997 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.569451 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.569624 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.583702 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.610883 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.633724 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.633932 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.735332 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.735421 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.735575 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.760164 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:05 crc kubenswrapper[4702]: I1124 17:51:05.895746 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:06 crc kubenswrapper[4702]: I1124 17:51:06.077095 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:06 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:06 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:06 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:06 crc kubenswrapper[4702]: I1124 17:51:06.077210 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:06 crc kubenswrapper[4702]: I1124 17:51:06.244343 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:51:06 crc kubenswrapper[4702]: I1124 17:51:06.264151 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerStarted","Data":"086de27ba8e2b96b1e54bd0ba93388d631daeeec383bbac5ab558eab0af3c2e1"} Nov 24 17:51:06 crc kubenswrapper[4702]: W1124 17:51:06.265678 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podc9f347bd_8757_4a61_a961_263aba57701b.slice/crio-048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1 WatchSource:0}: Error finding container 048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1: Status 404 returned error can't find the container with id 048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1 Nov 24 17:51:06 crc kubenswrapper[4702]: I1124 17:51:06.269739 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerStarted","Data":"06b4f9916444ea9fa2a3bfc97f8e0f66ec0ab18ba4d87423dc1fa3f046799420"} Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.077711 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:07 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:07 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:07 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.078231 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.281318 4702 generic.go:334] "Generic (PLEG): container finished" podID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerID="9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b" exitCode=0 Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.281427 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerDied","Data":"9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b"} Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.285078 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c9f347bd-8757-4a61-a961-263aba57701b","Type":"ContainerStarted","Data":"048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1"} Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.287396 4702 generic.go:334] "Generic (PLEG): container finished" podID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerID="81b810d75212b2a2622625ce8710cfd75b5f6cf1182f59593a2855442517db09" exitCode=0 Nov 24 17:51:07 crc kubenswrapper[4702]: I1124 17:51:07.287431 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerDied","Data":"81b810d75212b2a2622625ce8710cfd75b5f6cf1182f59593a2855442517db09"} Nov 24 17:51:08 crc kubenswrapper[4702]: I1124 17:51:08.076224 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:08 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:08 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:08 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:08 crc kubenswrapper[4702]: I1124 17:51:08.076297 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:08 crc kubenswrapper[4702]: I1124 17:51:08.300334 4702 generic.go:334] "Generic (PLEG): container finished" podID="c9f347bd-8757-4a61-a961-263aba57701b" containerID="ecc095787236750f63ef9429e8a230148aac28e35c27a37231b58d22986bae56" exitCode=0 Nov 24 17:51:08 crc kubenswrapper[4702]: I1124 17:51:08.300433 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c9f347bd-8757-4a61-a961-263aba57701b","Type":"ContainerDied","Data":"ecc095787236750f63ef9429e8a230148aac28e35c27a37231b58d22986bae56"} Nov 24 17:51:08 crc kubenswrapper[4702]: I1124 17:51:08.936014 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-5bsls" Nov 24 17:51:09 crc kubenswrapper[4702]: I1124 17:51:09.077249 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:09 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:09 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:09 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:09 crc kubenswrapper[4702]: I1124 17:51:09.077342 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:10 crc kubenswrapper[4702]: I1124 17:51:10.075733 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:10 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:10 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:10 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:10 crc kubenswrapper[4702]: I1124 17:51:10.075991 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:11 crc kubenswrapper[4702]: I1124 17:51:11.077383 4702 patch_prober.go:28] interesting pod/router-default-5444994796-r5tsm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:51:11 crc kubenswrapper[4702]: [-]has-synced failed: reason withheld Nov 24 17:51:11 crc kubenswrapper[4702]: [+]process-running ok Nov 24 17:51:11 crc kubenswrapper[4702]: healthz check failed Nov 24 17:51:11 crc kubenswrapper[4702]: I1124 17:51:11.077880 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r5tsm" podUID="47b72971-05dc-4099-8e83-04ec202d36a6" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.053180 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.077256 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.084907 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-r5tsm" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.144694 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir\") pod \"c9f347bd-8757-4a61-a961-263aba57701b\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.144933 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access\") pod \"c9f347bd-8757-4a61-a961-263aba57701b\" (UID: \"c9f347bd-8757-4a61-a961-263aba57701b\") " Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.144928 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c9f347bd-8757-4a61-a961-263aba57701b" (UID: "c9f347bd-8757-4a61-a961-263aba57701b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.145272 4702 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c9f347bd-8757-4a61-a961-263aba57701b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.156642 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c9f347bd-8757-4a61-a961-263aba57701b" (UID: "c9f347bd-8757-4a61-a961-263aba57701b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.248072 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c9f347bd-8757-4a61-a961-263aba57701b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.334811 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c9f347bd-8757-4a61-a961-263aba57701b","Type":"ContainerDied","Data":"048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1"} Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.334835 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:51:12 crc kubenswrapper[4702]: I1124 17:51:12.334862 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="048286d09513241fb595dbc7ad6913625cca4ec70c464e3fb461d0ff286fbae1" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.320542 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.320616 4702 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsc9b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.321151 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.321236 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsc9b" podUID="d26d5c3e-d8c6-4460-9b45-1a2c45971be5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.392739 4702 patch_prober.go:28] interesting pod/console-f9d7485db-67bvv container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.392912 4702 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-67bvv" podUID="b1bc1040-fa30-45f9-ab55-54673b3536a2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.467503 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.470939 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c28d90e3-ab19-480f-989e-3e49d1289b7a-metrics-certs\") pod \"network-metrics-daemon-wkxgm\" (UID: \"c28d90e3-ab19-480f-989e-3e49d1289b7a\") " pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:51:13 crc kubenswrapper[4702]: I1124 17:51:13.856088 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wkxgm" Nov 24 17:51:22 crc kubenswrapper[4702]: I1124 17:51:22.111465 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:51:22 crc kubenswrapper[4702]: I1124 17:51:22.482684 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:51:22 crc kubenswrapper[4702]: I1124 17:51:22.482743 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:51:23 crc kubenswrapper[4702]: I1124 17:51:23.339464 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-fsc9b" Nov 24 17:51:23 crc kubenswrapper[4702]: I1124 17:51:23.396310 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:51:23 crc kubenswrapper[4702]: I1124 17:51:23.402313 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-67bvv" Nov 24 17:51:24 crc kubenswrapper[4702]: I1124 17:51:24.601097 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wkxgm"] Nov 24 17:51:25 crc kubenswrapper[4702]: E1124 17:51:25.008616 4702 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 24 17:51:25 crc kubenswrapper[4702]: E1124 17:51:25.008909 4702 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ptxd6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-cccg8_openshift-marketplace(fc76efe2-8f4e-49be-b747-865d5aa98156): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:51:25 crc kubenswrapper[4702]: E1124 17:51:25.010095 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-cccg8" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" Nov 24 17:51:26 crc kubenswrapper[4702]: E1124 17:51:26.291826 4702 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 24 17:51:26 crc kubenswrapper[4702]: E1124 17:51:26.292347 4702 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nskw6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-z7fl6_openshift-marketplace(a782d56b-abdd-4128-a560-d1c084d86ac9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:51:26 crc kubenswrapper[4702]: E1124 17:51:26.293770 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-z7fl6" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" Nov 24 17:51:29 crc kubenswrapper[4702]: E1124 17:51:29.321904 4702 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 24 17:51:29 crc kubenswrapper[4702]: E1124 17:51:29.322402 4702 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6slx9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vt8ck_openshift-marketplace(74f9355e-0937-46d3-892a-a2eba2be98d6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:51:29 crc kubenswrapper[4702]: E1124 17:51:29.323635 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vt8ck" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" Nov 24 17:51:32 crc kubenswrapper[4702]: E1124 17:51:32.748174 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-z7fl6" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" Nov 24 17:51:32 crc kubenswrapper[4702]: E1124 17:51:32.748198 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-cccg8" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" Nov 24 17:51:32 crc kubenswrapper[4702]: E1124 17:51:32.748174 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vt8ck" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" Nov 24 17:51:33 crc kubenswrapper[4702]: I1124 17:51:33.523999 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9pcn8" Nov 24 17:51:37 crc kubenswrapper[4702]: W1124 17:51:37.120097 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc28d90e3_ab19_480f_989e_3e49d1289b7a.slice/crio-0dbb98d3b0659312625b07f4f77962c5eb6eb7923342436402307ecc4c68df70 WatchSource:0}: Error finding container 0dbb98d3b0659312625b07f4f77962c5eb6eb7923342436402307ecc4c68df70: Status 404 returned error can't find the container with id 0dbb98d3b0659312625b07f4f77962c5eb6eb7923342436402307ecc4c68df70 Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.149182 4702 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.149370 4702 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jgw7m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-jtdv7_openshift-marketplace(12912be2-7861-4e86-9902-789cb8ae519f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.150584 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-jtdv7" podUID="12912be2-7861-4e86-9902-789cb8ae519f" Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.167019 4702 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.167185 4702 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lphsk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-5xbxf_openshift-marketplace(c65d2141-2672-4f1c-a600-38ccf4af357d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.168910 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-5xbxf" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" Nov 24 17:51:37 crc kubenswrapper[4702]: I1124 17:51:37.478773 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" event={"ID":"c28d90e3-ab19-480f-989e-3e49d1289b7a","Type":"ContainerStarted","Data":"0dbb98d3b0659312625b07f4f77962c5eb6eb7923342436402307ecc4c68df70"} Nov 24 17:51:37 crc kubenswrapper[4702]: I1124 17:51:37.484529 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerStarted","Data":"70cc1a7b380fa6011d51df60863bc0134383fa456941aa8f35e498df68762c01"} Nov 24 17:51:37 crc kubenswrapper[4702]: E1124 17:51:37.487070 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-5xbxf" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.490534 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" event={"ID":"c28d90e3-ab19-480f-989e-3e49d1289b7a","Type":"ContainerStarted","Data":"49707d60938fc6cf0decd74292e6e5343bdb40b068e2a7c2cb769fe13e646b79"} Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.491964 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wkxgm" event={"ID":"c28d90e3-ab19-480f-989e-3e49d1289b7a","Type":"ContainerStarted","Data":"83844395dedb0f7320b1f76f94a565ec7735c2845a4112f10b829f50614fdc96"} Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.492551 4702 generic.go:334] "Generic (PLEG): container finished" podID="9600d94e-db9c-4688-91d6-6356beb6987a" containerID="dbb2dcb55e04475ebcea4779a7185faaed7740f68762ac649f9414ba30af8e30" exitCode=0 Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.492685 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerDied","Data":"dbb2dcb55e04475ebcea4779a7185faaed7740f68762ac649f9414ba30af8e30"} Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.494740 4702 generic.go:334] "Generic (PLEG): container finished" podID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerID="4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df" exitCode=0 Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.494845 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerDied","Data":"4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df"} Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.497206 4702 generic.go:334] "Generic (PLEG): container finished" podID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerID="70cc1a7b380fa6011d51df60863bc0134383fa456941aa8f35e498df68762c01" exitCode=0 Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.497237 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerDied","Data":"70cc1a7b380fa6011d51df60863bc0134383fa456941aa8f35e498df68762c01"} Nov 24 17:51:38 crc kubenswrapper[4702]: I1124 17:51:38.507668 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-wkxgm" podStartSLOduration=167.507647599 podStartE2EDuration="2m47.507647599s" podCreationTimestamp="2025-11-24 17:48:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:51:38.504641725 +0000 UTC m=+187.745382899" watchObservedRunningTime="2025-11-24 17:51:38.507647599 +0000 UTC m=+187.748388783" Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.503263 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerStarted","Data":"e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f"} Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.505553 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerStarted","Data":"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188"} Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.508413 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerStarted","Data":"144471d1e14b5cbe6d829ab9754de2d2ce5d7ba7660e91303b48dda2401e7ac6"} Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.527271 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9lqsr" podStartSLOduration=2.82464078 podStartE2EDuration="38.527252556s" podCreationTimestamp="2025-11-24 17:51:01 +0000 UTC" firstStartedPulling="2025-11-24 17:51:03.203784787 +0000 UTC m=+152.444525941" lastFinishedPulling="2025-11-24 17:51:38.906396553 +0000 UTC m=+188.147137717" observedRunningTime="2025-11-24 17:51:39.522191198 +0000 UTC m=+188.762932362" watchObservedRunningTime="2025-11-24 17:51:39.527252556 +0000 UTC m=+188.767993720" Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.546636 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wjgnw" podStartSLOduration=3.895039256 podStartE2EDuration="35.546611692s" podCreationTimestamp="2025-11-24 17:51:04 +0000 UTC" firstStartedPulling="2025-11-24 17:51:07.289453641 +0000 UTC m=+156.530194805" lastFinishedPulling="2025-11-24 17:51:38.941026077 +0000 UTC m=+188.181767241" observedRunningTime="2025-11-24 17:51:39.543027049 +0000 UTC m=+188.783768213" watchObservedRunningTime="2025-11-24 17:51:39.546611692 +0000 UTC m=+188.787352866" Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.565922 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mn5kp" podStartSLOduration=8.645416246 podStartE2EDuration="35.565904106s" podCreationTimestamp="2025-11-24 17:51:04 +0000 UTC" firstStartedPulling="2025-11-24 17:51:12.000214061 +0000 UTC m=+161.240955225" lastFinishedPulling="2025-11-24 17:51:38.920701921 +0000 UTC m=+188.161443085" observedRunningTime="2025-11-24 17:51:39.563474319 +0000 UTC m=+188.804215493" watchObservedRunningTime="2025-11-24 17:51:39.565904106 +0000 UTC m=+188.806645270" Nov 24 17:51:39 crc kubenswrapper[4702]: I1124 17:51:39.584674 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:51:42 crc kubenswrapper[4702]: I1124 17:51:42.107416 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:42 crc kubenswrapper[4702]: I1124 17:51:42.107785 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:42 crc kubenswrapper[4702]: I1124 17:51:42.297654 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.036685 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.036972 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.085171 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.301136 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.301189 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.341173 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.579839 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:45 crc kubenswrapper[4702]: I1124 17:51:45.582600 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:51:46 crc kubenswrapper[4702]: I1124 17:51:46.548789 4702 generic.go:334] "Generic (PLEG): container finished" podID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerID="142be2dd5c2277c7706305685af776a0e484199e4c02228f48a2a7b072b8db50" exitCode=0 Nov 24 17:51:46 crc kubenswrapper[4702]: I1124 17:51:46.548869 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerDied","Data":"142be2dd5c2277c7706305685af776a0e484199e4c02228f48a2a7b072b8db50"} Nov 24 17:51:46 crc kubenswrapper[4702]: I1124 17:51:46.723331 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.558245 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerStarted","Data":"a822c38cb3a8a3cecbb88df0afe26ddcd1b8b986ee0d1d4fb8a4fc2fc7d9331c"} Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.560324 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerStarted","Data":"172d65e1f1f4ce47e917c45d5b66805802f6319ff6efdf5277cbbd6e88f80292"} Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.562091 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerStarted","Data":"66608f07fc1fb34977a249961820f817413738c1090844ae0595839490ade88e"} Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.562173 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mn5kp" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="registry-server" containerID="cri-o://1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188" gracePeriod=2 Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.585655 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z7fl6" podStartSLOduration=2.802695874 podStartE2EDuration="46.585630083s" podCreationTimestamp="2025-11-24 17:51:01 +0000 UTC" firstStartedPulling="2025-11-24 17:51:03.210558008 +0000 UTC m=+152.451299172" lastFinishedPulling="2025-11-24 17:51:46.993492217 +0000 UTC m=+196.234233381" observedRunningTime="2025-11-24 17:51:47.576862236 +0000 UTC m=+196.817603400" watchObservedRunningTime="2025-11-24 17:51:47.585630083 +0000 UTC m=+196.826371247" Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.873110 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.939902 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities\") pod \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.940341 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4488g\" (UniqueName: \"kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g\") pod \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.940494 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content\") pod \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\" (UID: \"35a4301f-605f-4bbb-bb14-453e7c15eb9b\") " Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.941019 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities" (OuterVolumeSpecName: "utilities") pod "35a4301f-605f-4bbb-bb14-453e7c15eb9b" (UID: "35a4301f-605f-4bbb-bb14-453e7c15eb9b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:47 crc kubenswrapper[4702]: I1124 17:51:47.946136 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g" (OuterVolumeSpecName: "kube-api-access-4488g") pod "35a4301f-605f-4bbb-bb14-453e7c15eb9b" (UID: "35a4301f-605f-4bbb-bb14-453e7c15eb9b"). InnerVolumeSpecName "kube-api-access-4488g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.041911 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.041949 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4488g\" (UniqueName: \"kubernetes.io/projected/35a4301f-605f-4bbb-bb14-453e7c15eb9b-kube-api-access-4488g\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.043050 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.095423 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35a4301f-605f-4bbb-bb14-453e7c15eb9b" (UID: "35a4301f-605f-4bbb-bb14-453e7c15eb9b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.143273 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a4301f-605f-4bbb-bb14-453e7c15eb9b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.568496 4702 generic.go:334] "Generic (PLEG): container finished" podID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerID="66608f07fc1fb34977a249961820f817413738c1090844ae0595839490ade88e" exitCode=0 Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.569652 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerDied","Data":"66608f07fc1fb34977a249961820f817413738c1090844ae0595839490ade88e"} Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.572338 4702 generic.go:334] "Generic (PLEG): container finished" podID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerID="1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188" exitCode=0 Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.572396 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerDied","Data":"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188"} Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.572427 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mn5kp" event={"ID":"35a4301f-605f-4bbb-bb14-453e7c15eb9b","Type":"ContainerDied","Data":"086de27ba8e2b96b1e54bd0ba93388d631daeeec383bbac5ab558eab0af3c2e1"} Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.572446 4702 scope.go:117] "RemoveContainer" containerID="1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.572573 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mn5kp" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.581189 4702 generic.go:334] "Generic (PLEG): container finished" podID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerID="172d65e1f1f4ce47e917c45d5b66805802f6319ff6efdf5277cbbd6e88f80292" exitCode=0 Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.581250 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerDied","Data":"172d65e1f1f4ce47e917c45d5b66805802f6319ff6efdf5277cbbd6e88f80292"} Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.603625 4702 scope.go:117] "RemoveContainer" containerID="4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.612706 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.616361 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mn5kp"] Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.631080 4702 scope.go:117] "RemoveContainer" containerID="9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.648037 4702 scope.go:117] "RemoveContainer" containerID="1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188" Nov 24 17:51:48 crc kubenswrapper[4702]: E1124 17:51:48.649097 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188\": container with ID starting with 1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188 not found: ID does not exist" containerID="1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.649154 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188"} err="failed to get container status \"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188\": rpc error: code = NotFound desc = could not find container \"1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188\": container with ID starting with 1c755cccaf2ee9cb846b38b59b589fe65177100ecc94aee9321fc5dd65148188 not found: ID does not exist" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.649235 4702 scope.go:117] "RemoveContainer" containerID="4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df" Nov 24 17:51:48 crc kubenswrapper[4702]: E1124 17:51:48.649822 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df\": container with ID starting with 4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df not found: ID does not exist" containerID="4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.649882 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df"} err="failed to get container status \"4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df\": rpc error: code = NotFound desc = could not find container \"4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df\": container with ID starting with 4d83c8109b1783267c2ec78e75c8ed81420c98df8553fb59a4a72cca315c42df not found: ID does not exist" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.649923 4702 scope.go:117] "RemoveContainer" containerID="9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b" Nov 24 17:51:48 crc kubenswrapper[4702]: E1124 17:51:48.650374 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b\": container with ID starting with 9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b not found: ID does not exist" containerID="9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b" Nov 24 17:51:48 crc kubenswrapper[4702]: I1124 17:51:48.650434 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b"} err="failed to get container status \"9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b\": rpc error: code = NotFound desc = could not find container \"9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b\": container with ID starting with 9ee8fa515fd4429af7c3170a4b6642cfc84a1713227ddfaea4c1903f0069238b not found: ID does not exist" Nov 24 17:51:49 crc kubenswrapper[4702]: I1124 17:51:49.589311 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerStarted","Data":"69fe9ee27049121e36be5a7ae25b092d8f64d1ab84891496ae3d6bb5fdbda19b"} Nov 24 17:51:49 crc kubenswrapper[4702]: I1124 17:51:49.592606 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerStarted","Data":"d45a41aed6f8ef2791f5587526bdb205e5670b9da8c884c36cfd03959788c320"} Nov 24 17:51:49 crc kubenswrapper[4702]: I1124 17:51:49.605851 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cccg8" podStartSLOduration=2.699482633 podStartE2EDuration="48.605782391s" podCreationTimestamp="2025-11-24 17:51:01 +0000 UTC" firstStartedPulling="2025-11-24 17:51:03.205939183 +0000 UTC m=+152.446680347" lastFinishedPulling="2025-11-24 17:51:49.112238941 +0000 UTC m=+198.352980105" observedRunningTime="2025-11-24 17:51:49.604498933 +0000 UTC m=+198.845240117" watchObservedRunningTime="2025-11-24 17:51:49.605782391 +0000 UTC m=+198.846523555" Nov 24 17:51:49 crc kubenswrapper[4702]: I1124 17:51:49.628825 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vt8ck" podStartSLOduration=2.8121940370000003 podStartE2EDuration="48.62878969s" podCreationTimestamp="2025-11-24 17:51:01 +0000 UTC" firstStartedPulling="2025-11-24 17:51:03.208738651 +0000 UTC m=+152.449479815" lastFinishedPulling="2025-11-24 17:51:49.025334304 +0000 UTC m=+198.266075468" observedRunningTime="2025-11-24 17:51:49.625231209 +0000 UTC m=+198.865972373" watchObservedRunningTime="2025-11-24 17:51:49.62878969 +0000 UTC m=+198.869530854" Nov 24 17:51:49 crc kubenswrapper[4702]: I1124 17:51:49.655711 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" path="/var/lib/kubelet/pods/35a4301f-605f-4bbb-bb14-453e7c15eb9b/volumes" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.700201 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.702838 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.750871 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.891210 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.891263 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:51 crc kubenswrapper[4702]: I1124 17:51:51.928942 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.146403 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.383863 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.383931 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.427234 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.482500 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.482556 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.482601 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.483065 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.483123 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f" gracePeriod=600 Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.643450 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.918967 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:52 crc kubenswrapper[4702]: I1124 17:51:52.920368 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9lqsr" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="registry-server" containerID="cri-o://e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f" gracePeriod=2 Nov 24 17:51:53 crc kubenswrapper[4702]: E1124 17:51:53.044670 4702 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9600d94e_db9c_4688_91d6_6356beb6987a.slice/crio-e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f.scope\": RecentStats: unable to find data in memory cache]" Nov 24 17:51:53 crc kubenswrapper[4702]: I1124 17:51:53.614379 4702 generic.go:334] "Generic (PLEG): container finished" podID="9600d94e-db9c-4688-91d6-6356beb6987a" containerID="e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f" exitCode=0 Nov 24 17:51:53 crc kubenswrapper[4702]: I1124 17:51:53.614475 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerDied","Data":"e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f"} Nov 24 17:51:53 crc kubenswrapper[4702]: I1124 17:51:53.616310 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f" exitCode=0 Nov 24 17:51:53 crc kubenswrapper[4702]: I1124 17:51:53.616415 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.320153 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.320836 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z7fl6" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="registry-server" containerID="cri-o://a822c38cb3a8a3cecbb88df0afe26ddcd1b8b986ee0d1d4fb8a4fc2fc7d9331c" gracePeriod=2 Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.463390 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.541478 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkr98\" (UniqueName: \"kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98\") pod \"9600d94e-db9c-4688-91d6-6356beb6987a\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.541586 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities\") pod \"9600d94e-db9c-4688-91d6-6356beb6987a\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.541661 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content\") pod \"9600d94e-db9c-4688-91d6-6356beb6987a\" (UID: \"9600d94e-db9c-4688-91d6-6356beb6987a\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.542644 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities" (OuterVolumeSpecName: "utilities") pod "9600d94e-db9c-4688-91d6-6356beb6987a" (UID: "9600d94e-db9c-4688-91d6-6356beb6987a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.546823 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98" (OuterVolumeSpecName: "kube-api-access-bkr98") pod "9600d94e-db9c-4688-91d6-6356beb6987a" (UID: "9600d94e-db9c-4688-91d6-6356beb6987a"). InnerVolumeSpecName "kube-api-access-bkr98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.592464 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9600d94e-db9c-4688-91d6-6356beb6987a" (UID: "9600d94e-db9c-4688-91d6-6356beb6987a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.629091 4702 generic.go:334] "Generic (PLEG): container finished" podID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerID="a822c38cb3a8a3cecbb88df0afe26ddcd1b8b986ee0d1d4fb8a4fc2fc7d9331c" exitCode=0 Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.629162 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerDied","Data":"a822c38cb3a8a3cecbb88df0afe26ddcd1b8b986ee0d1d4fb8a4fc2fc7d9331c"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.633649 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lqsr" event={"ID":"9600d94e-db9c-4688-91d6-6356beb6987a","Type":"ContainerDied","Data":"bfed549d10e4466d1090156d93339ac3034072c47bff77985b2791d49a37bdf9"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.633723 4702 scope.go:117] "RemoveContainer" containerID="e5c15b7bf8c35d10220e56584c677cc055f8d1ad1e1c296b6f8aec1c3845fa9f" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.633669 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lqsr" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.641639 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.643117 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkr98\" (UniqueName: \"kubernetes.io/projected/9600d94e-db9c-4688-91d6-6356beb6987a-kube-api-access-bkr98\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.643185 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.644749 4702 generic.go:334] "Generic (PLEG): container finished" podID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerID="cdd8aa3833a2c3296567333c0f0423569277fb87f520973748f48d1ebc00af1a" exitCode=0 Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.644895 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerDied","Data":"cdd8aa3833a2c3296567333c0f0423569277fb87f520973748f48d1ebc00af1a"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.646388 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9600d94e-db9c-4688-91d6-6356beb6987a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.658624 4702 scope.go:117] "RemoveContainer" containerID="dbb2dcb55e04475ebcea4779a7185faaed7740f68762ac649f9414ba30af8e30" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.663147 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.664670 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerStarted","Data":"4e7dbd847df09edc60027ed709fe9b8ddc6ba6f318bc535b931d16e3d17f885e"} Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.691108 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.693010 4702 scope.go:117] "RemoveContainer" containerID="e85aa0fffa793305569f7e14085856952bd39c10eaaeb4a65f86bcb6322713af" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.694672 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9lqsr"] Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.747750 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content\") pod \"a782d56b-abdd-4128-a560-d1c084d86ac9\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.747876 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities\") pod \"a782d56b-abdd-4128-a560-d1c084d86ac9\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.747924 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nskw6\" (UniqueName: \"kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6\") pod \"a782d56b-abdd-4128-a560-d1c084d86ac9\" (UID: \"a782d56b-abdd-4128-a560-d1c084d86ac9\") " Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.749942 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities" (OuterVolumeSpecName: "utilities") pod "a782d56b-abdd-4128-a560-d1c084d86ac9" (UID: "a782d56b-abdd-4128-a560-d1c084d86ac9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.752351 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6" (OuterVolumeSpecName: "kube-api-access-nskw6") pod "a782d56b-abdd-4128-a560-d1c084d86ac9" (UID: "a782d56b-abdd-4128-a560-d1c084d86ac9"). InnerVolumeSpecName "kube-api-access-nskw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.810202 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a782d56b-abdd-4128-a560-d1c084d86ac9" (UID: "a782d56b-abdd-4128-a560-d1c084d86ac9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.849447 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.849492 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782d56b-abdd-4128-a560-d1c084d86ac9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:55 crc kubenswrapper[4702]: I1124 17:51:55.849502 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nskw6\" (UniqueName: \"kubernetes.io/projected/a782d56b-abdd-4128-a560-d1c084d86ac9-kube-api-access-nskw6\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.658975 4702 generic.go:334] "Generic (PLEG): container finished" podID="12912be2-7861-4e86-9902-789cb8ae519f" containerID="4e7dbd847df09edc60027ed709fe9b8ddc6ba6f318bc535b931d16e3d17f885e" exitCode=0 Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.659070 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerDied","Data":"4e7dbd847df09edc60027ed709fe9b8ddc6ba6f318bc535b931d16e3d17f885e"} Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.665840 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7fl6" event={"ID":"a782d56b-abdd-4128-a560-d1c084d86ac9","Type":"ContainerDied","Data":"97548912203baf7997b02da562014b20d891e1371743240a53752adc848a74e1"} Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.665890 4702 scope.go:117] "RemoveContainer" containerID="a822c38cb3a8a3cecbb88df0afe26ddcd1b8b986ee0d1d4fb8a4fc2fc7d9331c" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.666058 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7fl6" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.670872 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerStarted","Data":"06c84f714475a4e00be5021d3ce2d7f91f5ae759b374f698e516f726b5de846c"} Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.688524 4702 scope.go:117] "RemoveContainer" containerID="142be2dd5c2277c7706305685af776a0e484199e4c02228f48a2a7b072b8db50" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.711570 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5xbxf" podStartSLOduration=2.883289837 podStartE2EDuration="53.711554339s" podCreationTimestamp="2025-11-24 17:51:03 +0000 UTC" firstStartedPulling="2025-11-24 17:51:05.255919274 +0000 UTC m=+154.496660438" lastFinishedPulling="2025-11-24 17:51:56.084183776 +0000 UTC m=+205.324924940" observedRunningTime="2025-11-24 17:51:56.711076999 +0000 UTC m=+205.951818163" watchObservedRunningTime="2025-11-24 17:51:56.711554339 +0000 UTC m=+205.952295503" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.730978 4702 scope.go:117] "RemoveContainer" containerID="84b26b87ac3f2fa2aae8a3208955c9ea3dd402e885c5ba5f4037ac07eb4a9854" Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.733449 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:56 crc kubenswrapper[4702]: I1124 17:51:56.737960 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z7fl6"] Nov 24 17:51:57 crc kubenswrapper[4702]: I1124 17:51:57.655852 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" path="/var/lib/kubelet/pods/9600d94e-db9c-4688-91d6-6356beb6987a/volumes" Nov 24 17:51:57 crc kubenswrapper[4702]: I1124 17:51:57.656871 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" path="/var/lib/kubelet/pods/a782d56b-abdd-4128-a560-d1c084d86ac9/volumes" Nov 24 17:51:57 crc kubenswrapper[4702]: I1124 17:51:57.677374 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerStarted","Data":"50712f5f1b16b22907e192eb29683769702e25c3fd9393f9828362ec68619ed1"} Nov 24 17:51:57 crc kubenswrapper[4702]: I1124 17:51:57.696097 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jtdv7" podStartSLOduration=3.892721053 podStartE2EDuration="54.696079443s" podCreationTimestamp="2025-11-24 17:51:03 +0000 UTC" firstStartedPulling="2025-11-24 17:51:06.274058956 +0000 UTC m=+155.514800120" lastFinishedPulling="2025-11-24 17:51:57.077417346 +0000 UTC m=+206.318158510" observedRunningTime="2025-11-24 17:51:57.695787947 +0000 UTC m=+206.936529111" watchObservedRunningTime="2025-11-24 17:51:57.696079443 +0000 UTC m=+206.936820607" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.062058 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.062260 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" containerID="cri-o://5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20" gracePeriod=30 Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.167690 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.168109 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" containerID="cri-o://ddd8ada8e8ae99da03a1381126ee58e5f547a3b88f83112b99e0eb309e7e3661" gracePeriod=30 Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.481181 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.584054 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert\") pod \"47976e32-f007-462e-9df5-e2c674f8b73a\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.584106 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config\") pod \"47976e32-f007-462e-9df5-e2c674f8b73a\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.584126 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgvjb\" (UniqueName: \"kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb\") pod \"47976e32-f007-462e-9df5-e2c674f8b73a\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.584214 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca\") pod \"47976e32-f007-462e-9df5-e2c674f8b73a\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.584264 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles\") pod \"47976e32-f007-462e-9df5-e2c674f8b73a\" (UID: \"47976e32-f007-462e-9df5-e2c674f8b73a\") " Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.585059 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca" (OuterVolumeSpecName: "client-ca") pod "47976e32-f007-462e-9df5-e2c674f8b73a" (UID: "47976e32-f007-462e-9df5-e2c674f8b73a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.585177 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config" (OuterVolumeSpecName: "config") pod "47976e32-f007-462e-9df5-e2c674f8b73a" (UID: "47976e32-f007-462e-9df5-e2c674f8b73a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.585516 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "47976e32-f007-462e-9df5-e2c674f8b73a" (UID: "47976e32-f007-462e-9df5-e2c674f8b73a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.590234 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb" (OuterVolumeSpecName: "kube-api-access-vgvjb") pod "47976e32-f007-462e-9df5-e2c674f8b73a" (UID: "47976e32-f007-462e-9df5-e2c674f8b73a"). InnerVolumeSpecName "kube-api-access-vgvjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.590288 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "47976e32-f007-462e-9df5-e2c674f8b73a" (UID: "47976e32-f007-462e-9df5-e2c674f8b73a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.685058 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.685087 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.685098 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47976e32-f007-462e-9df5-e2c674f8b73a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.685147 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47976e32-f007-462e-9df5-e2c674f8b73a-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.685157 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgvjb\" (UniqueName: \"kubernetes.io/projected/47976e32-f007-462e-9df5-e2c674f8b73a-kube-api-access-vgvjb\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.686741 4702 generic.go:334] "Generic (PLEG): container finished" podID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerID="ddd8ada8e8ae99da03a1381126ee58e5f547a3b88f83112b99e0eb309e7e3661" exitCode=0 Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.686810 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" event={"ID":"435d3ff9-5e63-414a-a5ea-1baf2d52c14c","Type":"ContainerDied","Data":"ddd8ada8e8ae99da03a1381126ee58e5f547a3b88f83112b99e0eb309e7e3661"} Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.688015 4702 generic.go:334] "Generic (PLEG): container finished" podID="47976e32-f007-462e-9df5-e2c674f8b73a" containerID="5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20" exitCode=0 Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.688037 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" event={"ID":"47976e32-f007-462e-9df5-e2c674f8b73a","Type":"ContainerDied","Data":"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20"} Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.688056 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" event={"ID":"47976e32-f007-462e-9df5-e2c674f8b73a","Type":"ContainerDied","Data":"c9e761b2c056a832556741aa7f554befd55ffdf53efb911f578c38f9e3aedc0e"} Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.688074 4702 scope.go:117] "RemoveContainer" containerID="5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.688081 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sjf4l" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.703062 4702 scope.go:117] "RemoveContainer" containerID="5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20" Nov 24 17:51:58 crc kubenswrapper[4702]: E1124 17:51:58.703453 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20\": container with ID starting with 5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20 not found: ID does not exist" containerID="5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.703483 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20"} err="failed to get container status \"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20\": rpc error: code = NotFound desc = could not find container \"5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20\": container with ID starting with 5f0702ca4afc08538b26eb2470c438d0faf0fb035059d1bb0915393ab654cd20 not found: ID does not exist" Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.714471 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:51:58 crc kubenswrapper[4702]: I1124 17:51:58.717178 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sjf4l"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.090143 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.190329 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca\") pod \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.190429 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config\") pod \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.190506 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lshph\" (UniqueName: \"kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph\") pod \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.190569 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert\") pod \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\" (UID: \"435d3ff9-5e63-414a-a5ea-1baf2d52c14c\") " Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.191244 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca" (OuterVolumeSpecName: "client-ca") pod "435d3ff9-5e63-414a-a5ea-1baf2d52c14c" (UID: "435d3ff9-5e63-414a-a5ea-1baf2d52c14c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.192029 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config" (OuterVolumeSpecName: "config") pod "435d3ff9-5e63-414a-a5ea-1baf2d52c14c" (UID: "435d3ff9-5e63-414a-a5ea-1baf2d52c14c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.195407 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph" (OuterVolumeSpecName: "kube-api-access-lshph") pod "435d3ff9-5e63-414a-a5ea-1baf2d52c14c" (UID: "435d3ff9-5e63-414a-a5ea-1baf2d52c14c"). InnerVolumeSpecName "kube-api-access-lshph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.195495 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "435d3ff9-5e63-414a-a5ea-1baf2d52c14c" (UID: "435d3ff9-5e63-414a-a5ea-1baf2d52c14c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.291998 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.292040 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.292050 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lshph\" (UniqueName: \"kubernetes.io/projected/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-kube-api-access-lshph\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.292061 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/435d3ff9-5e63-414a-a5ea-1baf2d52c14c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.508622 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509053 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509072 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509086 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509094 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509107 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509115 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509787 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509826 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509843 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509853 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509865 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509873 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509884 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509892 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509903 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509911 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="extract-content" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509925 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509932 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509944 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509953 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="extract-utilities" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509968 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9f347bd-8757-4a61-a961-263aba57701b" containerName="pruner" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509976 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9f347bd-8757-4a61-a961-263aba57701b" containerName="pruner" Nov 24 17:51:59 crc kubenswrapper[4702]: E1124 17:51:59.509986 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.509993 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510871 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" containerName="controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510904 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="a782d56b-abdd-4128-a560-d1c084d86ac9" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510914 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="35a4301f-605f-4bbb-bb14-453e7c15eb9b" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510926 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="9600d94e-db9c-4688-91d6-6356beb6987a" containerName="registry-server" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510935 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9f347bd-8757-4a61-a961-263aba57701b" containerName="pruner" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.510946 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" containerName="route-controller-manager" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.511312 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.511413 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.511798 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.513911 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.514569 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.514600 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.514803 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.514833 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.515182 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.541038 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.542982 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.550936 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.595859 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.595924 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.595949 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.595966 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.595989 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdrlq\" (UniqueName: \"kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.596008 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlhzg\" (UniqueName: \"kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.596026 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.596049 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.596067 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.656429 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47976e32-f007-462e-9df5-e2c674f8b73a" path="/var/lib/kubelet/pods/47976e32-f007-462e-9df5-e2c674f8b73a/volumes" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.695408 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" event={"ID":"435d3ff9-5e63-414a-a5ea-1baf2d52c14c","Type":"ContainerDied","Data":"ccec92838ece5841c2f51376584a918ffafa3f672295b02a0957fef3fff5fdec"} Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.695464 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.695471 4702 scope.go:117] "RemoveContainer" containerID="ddd8ada8e8ae99da03a1381126ee58e5f547a3b88f83112b99e0eb309e7e3661" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.696800 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697243 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697278 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697312 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdrlq\" (UniqueName: \"kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697358 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlhzg\" (UniqueName: \"kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697409 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697461 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697519 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.697575 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.699333 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.699470 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.699892 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.700024 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.700621 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.701725 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.718099 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.718897 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdrlq\" (UniqueName: \"kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq\") pod \"controller-manager-7f7bf5bf79-chwfq\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.719082 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.720262 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t7l4h"] Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.722559 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlhzg\" (UniqueName: \"kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg\") pod \"route-controller-manager-77cddf497b-m6tgh\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.846954 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:51:59 crc kubenswrapper[4702]: I1124 17:51:59.860451 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:52:00 crc kubenswrapper[4702]: I1124 17:52:00.083897 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:52:00 crc kubenswrapper[4702]: I1124 17:52:00.337262 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:52:00 crc kubenswrapper[4702]: I1124 17:52:00.702383 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" event={"ID":"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e","Type":"ContainerStarted","Data":"22d8076e1839579ebc7e6e4544929abcb5c53bb44cdfddc169d66bc916919e8b"} Nov 24 17:52:00 crc kubenswrapper[4702]: I1124 17:52:00.704528 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" event={"ID":"6c41887a-53f2-42e0-9136-2d2f2d0716ea","Type":"ContainerStarted","Data":"d5cd2c2261e6f8d8d9e7ee276237bebf4b2ec187f4ca39a62dc5124be5b423ec"} Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.654752 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="435d3ff9-5e63-414a-a5ea-1baf2d52c14c" path="/var/lib/kubelet/pods/435d3ff9-5e63-414a-a5ea-1baf2d52c14c/volumes" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.712128 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" event={"ID":"6c41887a-53f2-42e0-9136-2d2f2d0716ea","Type":"ContainerStarted","Data":"068eb80856875cdea08073a228c0083e61c577ab1cc2d41f48629bea98f696b5"} Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.712795 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.719892 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" event={"ID":"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e","Type":"ContainerStarted","Data":"996844dc42854f6c9c8e5686be24c1919eb90db2a918c7547c30d4f2e59dc131"} Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.721060 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.723333 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.734299 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.738234 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" podStartSLOduration=3.738216331 podStartE2EDuration="3.738216331s" podCreationTimestamp="2025-11-24 17:51:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:01.734691852 +0000 UTC m=+210.975433026" watchObservedRunningTime="2025-11-24 17:52:01.738216331 +0000 UTC m=+210.978957505" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.747048 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.762361 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" podStartSLOduration=3.762342945 podStartE2EDuration="3.762342945s" podCreationTimestamp="2025-11-24 17:51:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:01.759072241 +0000 UTC m=+210.999813425" watchObservedRunningTime="2025-11-24 17:52:01.762342945 +0000 UTC m=+211.003084119" Nov 24 17:52:01 crc kubenswrapper[4702]: I1124 17:52:01.937105 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:52:03 crc kubenswrapper[4702]: I1124 17:52:03.897871 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:03 crc kubenswrapper[4702]: I1124 17:52:03.898766 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:03 crc kubenswrapper[4702]: I1124 17:52:03.938134 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:04 crc kubenswrapper[4702]: I1124 17:52:04.311517 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:04 crc kubenswrapper[4702]: I1124 17:52:04.311572 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:04 crc kubenswrapper[4702]: I1124 17:52:04.369511 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:04 crc kubenswrapper[4702]: I1124 17:52:04.767533 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:04 crc kubenswrapper[4702]: I1124 17:52:04.767723 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:06 crc kubenswrapper[4702]: I1124 17:52:06.719854 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:52:06 crc kubenswrapper[4702]: I1124 17:52:06.741898 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jtdv7" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="registry-server" containerID="cri-o://50712f5f1b16b22907e192eb29683769702e25c3fd9393f9828362ec68619ed1" gracePeriod=2 Nov 24 17:52:07 crc kubenswrapper[4702]: I1124 17:52:07.762904 4702 generic.go:334] "Generic (PLEG): container finished" podID="12912be2-7861-4e86-9902-789cb8ae519f" containerID="50712f5f1b16b22907e192eb29683769702e25c3fd9393f9828362ec68619ed1" exitCode=0 Nov 24 17:52:07 crc kubenswrapper[4702]: I1124 17:52:07.762990 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerDied","Data":"50712f5f1b16b22907e192eb29683769702e25c3fd9393f9828362ec68619ed1"} Nov 24 17:52:07 crc kubenswrapper[4702]: I1124 17:52:07.956532 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.007874 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities\") pod \"12912be2-7861-4e86-9902-789cb8ae519f\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.007946 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content\") pod \"12912be2-7861-4e86-9902-789cb8ae519f\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.007991 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgw7m\" (UniqueName: \"kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m\") pod \"12912be2-7861-4e86-9902-789cb8ae519f\" (UID: \"12912be2-7861-4e86-9902-789cb8ae519f\") " Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.009002 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities" (OuterVolumeSpecName: "utilities") pod "12912be2-7861-4e86-9902-789cb8ae519f" (UID: "12912be2-7861-4e86-9902-789cb8ae519f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.013143 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m" (OuterVolumeSpecName: "kube-api-access-jgw7m") pod "12912be2-7861-4e86-9902-789cb8ae519f" (UID: "12912be2-7861-4e86-9902-789cb8ae519f"). InnerVolumeSpecName "kube-api-access-jgw7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.026661 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12912be2-7861-4e86-9902-789cb8ae519f" (UID: "12912be2-7861-4e86-9902-789cb8ae519f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.109401 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.109446 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12912be2-7861-4e86-9902-789cb8ae519f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.109459 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgw7m\" (UniqueName: \"kubernetes.io/projected/12912be2-7861-4e86-9902-789cb8ae519f-kube-api-access-jgw7m\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.769849 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtdv7" event={"ID":"12912be2-7861-4e86-9902-789cb8ae519f","Type":"ContainerDied","Data":"6461ced1a40d2649d7a7a7da3208dc2262f91c8b48d81efd036b3ea8ad465c0c"} Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.769912 4702 scope.go:117] "RemoveContainer" containerID="50712f5f1b16b22907e192eb29683769702e25c3fd9393f9828362ec68619ed1" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.769928 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtdv7" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.792369 4702 scope.go:117] "RemoveContainer" containerID="4e7dbd847df09edc60027ed709fe9b8ddc6ba6f318bc535b931d16e3d17f885e" Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.797235 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.800606 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtdv7"] Nov 24 17:52:08 crc kubenswrapper[4702]: I1124 17:52:08.821474 4702 scope.go:117] "RemoveContainer" containerID="5c7af378d9414269b353f3830182f1f51c25817325ac9073272481d2cfb8cacb" Nov 24 17:52:09 crc kubenswrapper[4702]: I1124 17:52:09.655924 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12912be2-7861-4e86-9902-789cb8ae519f" path="/var/lib/kubelet/pods/12912be2-7861-4e86-9902-789cb8ae519f/volumes" Nov 24 17:52:13 crc kubenswrapper[4702]: I1124 17:52:13.068818 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerName="oauth-openshift" containerID="cri-o://2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001" gracePeriod=15 Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.642655 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703420 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703498 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703526 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703570 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703603 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703632 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703664 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703689 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703711 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703741 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703765 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703786 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703868 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.703903 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bcgg\" (UniqueName: \"kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg\") pod \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\" (UID: \"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a\") " Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.704966 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.705172 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.705213 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.705254 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.705436 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.709379 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.709795 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg" (OuterVolumeSpecName: "kube-api-access-7bcgg") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "kube-api-access-7bcgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.710317 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.710815 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.711487 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.711785 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.712386 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.712682 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.712783 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" (UID: "0b1e5e1b-4ea3-4481-9bdc-f24a7689775a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.799588 4702 generic.go:334] "Generic (PLEG): container finished" podID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerID="2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001" exitCode=0 Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.799629 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" event={"ID":"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a","Type":"ContainerDied","Data":"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001"} Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.799654 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" event={"ID":"0b1e5e1b-4ea3-4481-9bdc-f24a7689775a","Type":"ContainerDied","Data":"1c5eb18a9e1fa6a44aa65baee5f8fac9077e55b015498bf01189a6d4b980f372"} Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.799703 4702 scope.go:117] "RemoveContainer" containerID="2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.800062 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n2bxq" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804848 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804879 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804890 4702 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804903 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804915 4702 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804938 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804951 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804968 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804980 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804989 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.804998 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.805009 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.805021 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bcgg\" (UniqueName: \"kubernetes.io/projected/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-kube-api-access-7bcgg\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.805040 4702 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.820299 4702 scope.go:117] "RemoveContainer" containerID="2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001" Nov 24 17:52:14 crc kubenswrapper[4702]: E1124 17:52:14.825032 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001\": container with ID starting with 2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001 not found: ID does not exist" containerID="2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.825084 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001"} err="failed to get container status \"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001\": rpc error: code = NotFound desc = could not find container \"2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001\": container with ID starting with 2eeffa871e401069923af225350177927c9b278c3d1a112a45a1aa20a9e2b001 not found: ID does not exist" Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.830178 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:52:14 crc kubenswrapper[4702]: I1124 17:52:14.832592 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n2bxq"] Nov 24 17:52:15 crc kubenswrapper[4702]: I1124 17:52:15.654196 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" path="/var/lib/kubelet/pods/0b1e5e1b-4ea3-4481-9bdc-f24a7689775a/volumes" Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.046647 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.046919 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" podUID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" containerName="controller-manager" containerID="cri-o://068eb80856875cdea08073a228c0083e61c577ab1cc2d41f48629bea98f696b5" gracePeriod=30 Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.058335 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.058581 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" podUID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" containerName="route-controller-manager" containerID="cri-o://996844dc42854f6c9c8e5686be24c1919eb90db2a918c7547c30d4f2e59dc131" gracePeriod=30 Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.820899 4702 generic.go:334] "Generic (PLEG): container finished" podID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" containerID="068eb80856875cdea08073a228c0083e61c577ab1cc2d41f48629bea98f696b5" exitCode=0 Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.820986 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" event={"ID":"6c41887a-53f2-42e0-9136-2d2f2d0716ea","Type":"ContainerDied","Data":"068eb80856875cdea08073a228c0083e61c577ab1cc2d41f48629bea98f696b5"} Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.823654 4702 generic.go:334] "Generic (PLEG): container finished" podID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" containerID="996844dc42854f6c9c8e5686be24c1919eb90db2a918c7547c30d4f2e59dc131" exitCode=0 Nov 24 17:52:18 crc kubenswrapper[4702]: I1124 17:52:18.823685 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" event={"ID":"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e","Type":"ContainerDied","Data":"996844dc42854f6c9c8e5686be24c1919eb90db2a918c7547c30d4f2e59dc131"} Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.140534 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166530 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-689557999b-w776z"] Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.166731 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="registry-server" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166747 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="registry-server" Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.166760 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerName="oauth-openshift" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166767 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerName="oauth-openshift" Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.166779 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="extract-utilities" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166785 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="extract-utilities" Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.166814 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="extract-content" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166820 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="extract-content" Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.166828 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" containerName="route-controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166834 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" containerName="route-controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166924 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" containerName="route-controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166936 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="12912be2-7861-4e86-9902-789cb8ae519f" containerName="registry-server" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.166945 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b1e5e1b-4ea3-4481-9bdc-f24a7689775a" containerName="oauth-openshift" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.167367 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.184728 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-689557999b-w776z"] Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.199675 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261397 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles\") pod \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261471 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdrlq\" (UniqueName: \"kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq\") pod \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261509 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert\") pod \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261565 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca\") pod \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261591 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert\") pod \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261618 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config\") pod \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261651 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlhzg\" (UniqueName: \"kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg\") pod \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261725 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config\") pod \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\" (UID: \"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261751 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca\") pod \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\" (UID: \"6c41887a-53f2-42e0-9136-2d2f2d0716ea\") " Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.261960 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwgft\" (UniqueName: \"kubernetes.io/projected/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-kube-api-access-bwgft\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.262094 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-config\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.262176 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-client-ca\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.262218 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-serving-cert\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.262741 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca" (OuterVolumeSpecName: "client-ca") pod "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" (UID: "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.262999 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6c41887a-53f2-42e0-9136-2d2f2d0716ea" (UID: "6c41887a-53f2-42e0-9136-2d2f2d0716ea"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.263155 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca" (OuterVolumeSpecName: "client-ca") pod "6c41887a-53f2-42e0-9136-2d2f2d0716ea" (UID: "6c41887a-53f2-42e0-9136-2d2f2d0716ea"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.263166 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config" (OuterVolumeSpecName: "config") pod "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" (UID: "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.263484 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config" (OuterVolumeSpecName: "config") pod "6c41887a-53f2-42e0-9136-2d2f2d0716ea" (UID: "6c41887a-53f2-42e0-9136-2d2f2d0716ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.269687 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg" (OuterVolumeSpecName: "kube-api-access-hlhzg") pod "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" (UID: "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e"). InnerVolumeSpecName "kube-api-access-hlhzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.269917 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6c41887a-53f2-42e0-9136-2d2f2d0716ea" (UID: "6c41887a-53f2-42e0-9136-2d2f2d0716ea"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.270789 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq" (OuterVolumeSpecName: "kube-api-access-pdrlq") pod "6c41887a-53f2-42e0-9136-2d2f2d0716ea" (UID: "6c41887a-53f2-42e0-9136-2d2f2d0716ea"). InnerVolumeSpecName "kube-api-access-pdrlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.270818 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" (UID: "9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364142 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-client-ca\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364234 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-serving-cert\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364263 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwgft\" (UniqueName: \"kubernetes.io/projected/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-kube-api-access-bwgft\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364315 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-config\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364407 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364421 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c41887a-53f2-42e0-9136-2d2f2d0716ea-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364431 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364459 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlhzg\" (UniqueName: \"kubernetes.io/projected/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-kube-api-access-hlhzg\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364470 4702 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364478 4702 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364487 4702 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6c41887a-53f2-42e0-9136-2d2f2d0716ea-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364496 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdrlq\" (UniqueName: \"kubernetes.io/projected/6c41887a-53f2-42e0-9136-2d2f2d0716ea-kube-api-access-pdrlq\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.364504 4702 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.365943 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-client-ca\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.366129 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-config\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.367927 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-serving-cert\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.382350 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwgft\" (UniqueName: \"kubernetes.io/projected/7ecf25a9-1f0e-4903-886f-4a955b5b0a6c-kube-api-access-bwgft\") pod \"route-controller-manager-689557999b-w776z\" (UID: \"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c\") " pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.521693 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-64b5cd4787-lb4zp"] Nov 24 17:52:19 crc kubenswrapper[4702]: E1124 17:52:19.521955 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" containerName="controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.521969 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" containerName="controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.521967 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.522075 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" containerName="controller-manager" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.523494 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.527389 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.528009 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.528528 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.528778 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.529243 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.529421 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.529559 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.529703 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.530059 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.532554 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.532858 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.533160 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.537629 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64b5cd4787-lb4zp"] Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.540350 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.540638 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.545453 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668270 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668594 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-router-certs\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668615 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-service-ca\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668634 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668661 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668683 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-dir\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668704 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668724 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-login\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668740 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668755 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668774 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-error\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668854 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpvqb\" (UniqueName: \"kubernetes.io/projected/1dee3091-ca93-44c2-b9c0-8421a53f0894-kube-api-access-bpvqb\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668912 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-policies\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.668934 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-session\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.691838 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-689557999b-w776z"] Nov 24 17:52:19 crc kubenswrapper[4702]: W1124 17:52:19.697733 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ecf25a9_1f0e_4903_886f_4a955b5b0a6c.slice/crio-2bd15a8ef961069e4830bf3988764b1cda47df1ad78ac648ce73f137d98eda76 WatchSource:0}: Error finding container 2bd15a8ef961069e4830bf3988764b1cda47df1ad78ac648ce73f137d98eda76: Status 404 returned error can't find the container with id 2bd15a8ef961069e4830bf3988764b1cda47df1ad78ac648ce73f137d98eda76 Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770155 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpvqb\" (UniqueName: \"kubernetes.io/projected/1dee3091-ca93-44c2-b9c0-8421a53f0894-kube-api-access-bpvqb\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770214 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-policies\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770257 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-session\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770338 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770367 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-router-certs\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770392 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-service-ca\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770427 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770456 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770484 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-dir\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770508 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770531 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-login\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770549 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770570 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.770598 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-error\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.771086 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-policies\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.771322 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1dee3091-ca93-44c2-b9c0-8421a53f0894-audit-dir\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.771693 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.772182 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-service-ca\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.772899 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775076 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-error\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775183 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-login\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775847 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775854 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775721 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775751 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-router-certs\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.775591 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.776339 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/1dee3091-ca93-44c2-b9c0-8421a53f0894-v4-0-config-system-session\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.785337 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpvqb\" (UniqueName: \"kubernetes.io/projected/1dee3091-ca93-44c2-b9c0-8421a53f0894-kube-api-access-bpvqb\") pod \"oauth-openshift-64b5cd4787-lb4zp\" (UID: \"1dee3091-ca93-44c2-b9c0-8421a53f0894\") " pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.830608 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" event={"ID":"9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e","Type":"ContainerDied","Data":"22d8076e1839579ebc7e6e4544929abcb5c53bb44cdfddc169d66bc916919e8b"} Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.830665 4702 scope.go:117] "RemoveContainer" containerID="996844dc42854f6c9c8e5686be24c1919eb90db2a918c7547c30d4f2e59dc131" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.830674 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.832454 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" event={"ID":"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c","Type":"ContainerStarted","Data":"beea873e8b318e46c9a9a40147565a07f0fc7a242e42d32b030c84e964ec3c0c"} Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.832490 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" event={"ID":"7ecf25a9-1f0e-4903-886f-4a955b5b0a6c","Type":"ContainerStarted","Data":"2bd15a8ef961069e4830bf3988764b1cda47df1ad78ac648ce73f137d98eda76"} Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.832720 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.834209 4702 patch_prober.go:28] interesting pod/route-controller-manager-689557999b-w776z container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.56:8443/healthz\": dial tcp 10.217.0.56:8443: connect: connection refused" start-of-body= Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.834256 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" podUID="7ecf25a9-1f0e-4903-886f-4a955b5b0a6c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.56:8443/healthz\": dial tcp 10.217.0.56:8443: connect: connection refused" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.837780 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" event={"ID":"6c41887a-53f2-42e0-9136-2d2f2d0716ea","Type":"ContainerDied","Data":"d5cd2c2261e6f8d8d9e7ee276237bebf4b2ec187f4ca39a62dc5124be5b423ec"} Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.837983 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.840577 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.848563 4702 scope.go:117] "RemoveContainer" containerID="068eb80856875cdea08073a228c0083e61c577ab1cc2d41f48629bea98f696b5" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.856755 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" podStartSLOduration=1.85673528 podStartE2EDuration="1.85673528s" podCreationTimestamp="2025-11-24 17:52:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:19.852021153 +0000 UTC m=+229.092762327" watchObservedRunningTime="2025-11-24 17:52:19.85673528 +0000 UTC m=+229.097476444" Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.865325 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.872913 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77cddf497b-m6tgh"] Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.876171 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:52:19 crc kubenswrapper[4702]: I1124 17:52:19.879189 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7f7bf5bf79-chwfq"] Nov 24 17:52:20 crc kubenswrapper[4702]: I1124 17:52:20.021819 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64b5cd4787-lb4zp"] Nov 24 17:52:20 crc kubenswrapper[4702]: W1124 17:52:20.025874 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1dee3091_ca93_44c2_b9c0_8421a53f0894.slice/crio-472ac4b2dab47831f47e4d3d38518d6dbaaf4ed3fb5262e10f8385afcd2c97d7 WatchSource:0}: Error finding container 472ac4b2dab47831f47e4d3d38518d6dbaaf4ed3fb5262e10f8385afcd2c97d7: Status 404 returned error can't find the container with id 472ac4b2dab47831f47e4d3d38518d6dbaaf4ed3fb5262e10f8385afcd2c97d7 Nov 24 17:52:20 crc kubenswrapper[4702]: I1124 17:52:20.844294 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" event={"ID":"1dee3091-ca93-44c2-b9c0-8421a53f0894","Type":"ContainerStarted","Data":"88efa9fad97c0591c12553811ab09b6b05c80727022d9a66c41254b329c6a492"} Nov 24 17:52:20 crc kubenswrapper[4702]: I1124 17:52:20.844330 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" event={"ID":"1dee3091-ca93-44c2-b9c0-8421a53f0894","Type":"ContainerStarted","Data":"472ac4b2dab47831f47e4d3d38518d6dbaaf4ed3fb5262e10f8385afcd2c97d7"} Nov 24 17:52:20 crc kubenswrapper[4702]: I1124 17:52:20.848771 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-689557999b-w776z" Nov 24 17:52:20 crc kubenswrapper[4702]: I1124 17:52:20.864903 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" podStartSLOduration=32.864885205 podStartE2EDuration="32.864885205s" podCreationTimestamp="2025-11-24 17:51:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:20.861851837 +0000 UTC m=+230.102593021" watchObservedRunningTime="2025-11-24 17:52:20.864885205 +0000 UTC m=+230.105626379" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.521305 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c95d5c47f-98tn8"] Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.521942 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.524084 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.524523 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.524593 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.525197 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.525252 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.525443 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.529942 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.535758 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c95d5c47f-98tn8"] Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.591720 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-proxy-ca-bundles\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.591779 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b99vw\" (UniqueName: \"kubernetes.io/projected/c73195e3-a0bd-40d0-b111-b38043e48372-kube-api-access-b99vw\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.591932 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c73195e3-a0bd-40d0-b111-b38043e48372-serving-cert\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.592001 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-client-ca\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.592166 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-config\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.654665 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c41887a-53f2-42e0-9136-2d2f2d0716ea" path="/var/lib/kubelet/pods/6c41887a-53f2-42e0-9136-2d2f2d0716ea/volumes" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.655346 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e" path="/var/lib/kubelet/pods/9b2f8b9d-c9ec-4f72-90aa-4c26ae468d0e/volumes" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.693304 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c73195e3-a0bd-40d0-b111-b38043e48372-serving-cert\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.693353 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-client-ca\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.693405 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-config\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.693428 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-proxy-ca-bundles\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.693451 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b99vw\" (UniqueName: \"kubernetes.io/projected/c73195e3-a0bd-40d0-b111-b38043e48372-kube-api-access-b99vw\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.694576 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-client-ca\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.694626 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-proxy-ca-bundles\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.694989 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c73195e3-a0bd-40d0-b111-b38043e48372-config\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.699656 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c73195e3-a0bd-40d0-b111-b38043e48372-serving-cert\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.709711 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b99vw\" (UniqueName: \"kubernetes.io/projected/c73195e3-a0bd-40d0-b111-b38043e48372-kube-api-access-b99vw\") pod \"controller-manager-6c95d5c47f-98tn8\" (UID: \"c73195e3-a0bd-40d0-b111-b38043e48372\") " pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.841604 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.849840 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:21 crc kubenswrapper[4702]: I1124 17:52:21.854818 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-64b5cd4787-lb4zp" Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.039528 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c95d5c47f-98tn8"] Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.857263 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" event={"ID":"c73195e3-a0bd-40d0-b111-b38043e48372","Type":"ContainerStarted","Data":"b71ccb99e265fdc7efd8b31921ef3ac2d0ba69834e256c1abdf94f3376ef4792"} Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.857573 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" event={"ID":"c73195e3-a0bd-40d0-b111-b38043e48372","Type":"ContainerStarted","Data":"c1f5e17835c1f9a46bfc4ad157ced1af0af50aa98f722cad97537c2f1873b516"} Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.857593 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.863903 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" Nov 24 17:52:22 crc kubenswrapper[4702]: I1124 17:52:22.876541 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c95d5c47f-98tn8" podStartSLOduration=4.876525982 podStartE2EDuration="4.876525982s" podCreationTimestamp="2025-11-24 17:52:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:22.873511444 +0000 UTC m=+232.114252618" watchObservedRunningTime="2025-11-24 17:52:22.876525982 +0000 UTC m=+232.117267146" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.311360 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.312247 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cccg8" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="registry-server" containerID="cri-o://69fe9ee27049121e36be5a7ae25b092d8f64d1ab84891496ae3d6bb5fdbda19b" gracePeriod=30 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.321216 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.323785 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vt8ck" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="registry-server" containerID="cri-o://d45a41aed6f8ef2791f5587526bdb205e5670b9da8c884c36cfd03959788c320" gracePeriod=30 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.332599 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.332820 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" containerID="cri-o://066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8" gracePeriod=30 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.343255 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.343496 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5xbxf" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="registry-server" containerID="cri-o://06c84f714475a4e00be5021d3ce2d7f91f5ae759b374f698e516f726b5de846c" gracePeriod=30 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.355943 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.356178 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wjgnw" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="registry-server" containerID="cri-o://144471d1e14b5cbe6d829ab9754de2d2ce5d7ba7660e91303b48dda2401e7ac6" gracePeriod=30 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.360866 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9fvdq"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.361619 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.377416 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9fvdq"] Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.486284 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzhps\" (UniqueName: \"kubernetes.io/projected/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-kube-api-access-rzhps\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.486375 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.486408 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.588298 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.588358 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.588394 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzhps\" (UniqueName: \"kubernetes.io/projected/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-kube-api-access-rzhps\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.589734 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.594239 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.604229 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzhps\" (UniqueName: \"kubernetes.io/projected/687de2b1-fcda-4c00-a295-1b5ee7ef64c2-kube-api-access-rzhps\") pod \"marketplace-operator-79b997595-9fvdq\" (UID: \"687de2b1-fcda-4c00-a295-1b5ee7ef64c2\") " pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.777352 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.824241 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.894227 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca\") pod \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.894270 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics\") pod \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.894347 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4k7v\" (UniqueName: \"kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v\") pod \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\" (UID: \"206b06d8-9020-4e3f-b055-1a1bb10b0bcd\") " Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.896224 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "206b06d8-9020-4e3f-b055-1a1bb10b0bcd" (UID: "206b06d8-9020-4e3f-b055-1a1bb10b0bcd"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.903491 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "206b06d8-9020-4e3f-b055-1a1bb10b0bcd" (UID: "206b06d8-9020-4e3f-b055-1a1bb10b0bcd"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.904231 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v" (OuterVolumeSpecName: "kube-api-access-r4k7v") pod "206b06d8-9020-4e3f-b055-1a1bb10b0bcd" (UID: "206b06d8-9020-4e3f-b055-1a1bb10b0bcd"). InnerVolumeSpecName "kube-api-access-r4k7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.935940 4702 generic.go:334] "Generic (PLEG): container finished" podID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerID="d45a41aed6f8ef2791f5587526bdb205e5670b9da8c884c36cfd03959788c320" exitCode=0 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.936015 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerDied","Data":"d45a41aed6f8ef2791f5587526bdb205e5670b9da8c884c36cfd03959788c320"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.943874 4702 generic.go:334] "Generic (PLEG): container finished" podID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerID="066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8" exitCode=0 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.943953 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" event={"ID":"206b06d8-9020-4e3f-b055-1a1bb10b0bcd","Type":"ContainerDied","Data":"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.943985 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" event={"ID":"206b06d8-9020-4e3f-b055-1a1bb10b0bcd","Type":"ContainerDied","Data":"b3633835317117a9bc6522ab889c2d5eac9aa1acf2d127b09f41bcbbd80d1b8d"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.944008 4702 scope.go:117] "RemoveContainer" containerID="066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.944045 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zs4n4" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.951285 4702 generic.go:334] "Generic (PLEG): container finished" podID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerID="06c84f714475a4e00be5021d3ce2d7f91f5ae759b374f698e516f726b5de846c" exitCode=0 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.951350 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerDied","Data":"06c84f714475a4e00be5021d3ce2d7f91f5ae759b374f698e516f726b5de846c"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.959207 4702 generic.go:334] "Generic (PLEG): container finished" podID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerID="144471d1e14b5cbe6d829ab9754de2d2ce5d7ba7660e91303b48dda2401e7ac6" exitCode=0 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.959272 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerDied","Data":"144471d1e14b5cbe6d829ab9754de2d2ce5d7ba7660e91303b48dda2401e7ac6"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.963273 4702 scope.go:117] "RemoveContainer" containerID="066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8" Nov 24 17:52:37 crc kubenswrapper[4702]: E1124 17:52:37.964227 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8\": container with ID starting with 066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8 not found: ID does not exist" containerID="066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.964272 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8"} err="failed to get container status \"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8\": rpc error: code = NotFound desc = could not find container \"066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8\": container with ID starting with 066d8cff1e33c703953194d5d7fdbb8e9137b99c68fae73e15c6e568b00030d8 not found: ID does not exist" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.978407 4702 generic.go:334] "Generic (PLEG): container finished" podID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerID="69fe9ee27049121e36be5a7ae25b092d8f64d1ab84891496ae3d6bb5fdbda19b" exitCode=0 Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.978453 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerDied","Data":"69fe9ee27049121e36be5a7ae25b092d8f64d1ab84891496ae3d6bb5fdbda19b"} Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.996942 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4k7v\" (UniqueName: \"kubernetes.io/projected/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-kube-api-access-r4k7v\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.996979 4702 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.996990 4702 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/206b06d8-9020-4e3f-b055-1a1bb10b0bcd-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:37 crc kubenswrapper[4702]: I1124 17:52:37.998003 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.004087 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zs4n4"] Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.015833 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.043724 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.056181 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.086135 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097726 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6slx9\" (UniqueName: \"kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9\") pod \"74f9355e-0937-46d3-892a-a2eba2be98d6\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097781 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities\") pod \"9f0763a6-8199-42ee-8388-0b10e1061fa2\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097828 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content\") pod \"9f0763a6-8199-42ee-8388-0b10e1061fa2\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097847 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content\") pod \"74f9355e-0937-46d3-892a-a2eba2be98d6\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097914 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqpwf\" (UniqueName: \"kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf\") pod \"9f0763a6-8199-42ee-8388-0b10e1061fa2\" (UID: \"9f0763a6-8199-42ee-8388-0b10e1061fa2\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.097949 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities\") pod \"74f9355e-0937-46d3-892a-a2eba2be98d6\" (UID: \"74f9355e-0937-46d3-892a-a2eba2be98d6\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.099137 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities" (OuterVolumeSpecName: "utilities") pod "74f9355e-0937-46d3-892a-a2eba2be98d6" (UID: "74f9355e-0937-46d3-892a-a2eba2be98d6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.102034 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities" (OuterVolumeSpecName: "utilities") pod "9f0763a6-8199-42ee-8388-0b10e1061fa2" (UID: "9f0763a6-8199-42ee-8388-0b10e1061fa2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.106052 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf" (OuterVolumeSpecName: "kube-api-access-pqpwf") pod "9f0763a6-8199-42ee-8388-0b10e1061fa2" (UID: "9f0763a6-8199-42ee-8388-0b10e1061fa2"). InnerVolumeSpecName "kube-api-access-pqpwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.106165 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9" (OuterVolumeSpecName: "kube-api-access-6slx9") pod "74f9355e-0937-46d3-892a-a2eba2be98d6" (UID: "74f9355e-0937-46d3-892a-a2eba2be98d6"). InnerVolumeSpecName "kube-api-access-6slx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.177363 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74f9355e-0937-46d3-892a-a2eba2be98d6" (UID: "74f9355e-0937-46d3-892a-a2eba2be98d6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199252 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptxd6\" (UniqueName: \"kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6\") pod \"fc76efe2-8f4e-49be-b747-865d5aa98156\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199297 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content\") pod \"c65d2141-2672-4f1c-a600-38ccf4af357d\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199322 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lphsk\" (UniqueName: \"kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk\") pod \"c65d2141-2672-4f1c-a600-38ccf4af357d\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199369 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities\") pod \"c65d2141-2672-4f1c-a600-38ccf4af357d\" (UID: \"c65d2141-2672-4f1c-a600-38ccf4af357d\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199388 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content\") pod \"fc76efe2-8f4e-49be-b747-865d5aa98156\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199429 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities\") pod \"fc76efe2-8f4e-49be-b747-865d5aa98156\" (UID: \"fc76efe2-8f4e-49be-b747-865d5aa98156\") " Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199620 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199631 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6slx9\" (UniqueName: \"kubernetes.io/projected/74f9355e-0937-46d3-892a-a2eba2be98d6-kube-api-access-6slx9\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199641 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199649 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74f9355e-0937-46d3-892a-a2eba2be98d6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.199657 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqpwf\" (UniqueName: \"kubernetes.io/projected/9f0763a6-8199-42ee-8388-0b10e1061fa2-kube-api-access-pqpwf\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.200348 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities" (OuterVolumeSpecName: "utilities") pod "fc76efe2-8f4e-49be-b747-865d5aa98156" (UID: "fc76efe2-8f4e-49be-b747-865d5aa98156"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.200930 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities" (OuterVolumeSpecName: "utilities") pod "c65d2141-2672-4f1c-a600-38ccf4af357d" (UID: "c65d2141-2672-4f1c-a600-38ccf4af357d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.202572 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6" (OuterVolumeSpecName: "kube-api-access-ptxd6") pod "fc76efe2-8f4e-49be-b747-865d5aa98156" (UID: "fc76efe2-8f4e-49be-b747-865d5aa98156"). InnerVolumeSpecName "kube-api-access-ptxd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.203451 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk" (OuterVolumeSpecName: "kube-api-access-lphsk") pod "c65d2141-2672-4f1c-a600-38ccf4af357d" (UID: "c65d2141-2672-4f1c-a600-38ccf4af357d"). InnerVolumeSpecName "kube-api-access-lphsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.218569 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c65d2141-2672-4f1c-a600-38ccf4af357d" (UID: "c65d2141-2672-4f1c-a600-38ccf4af357d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.219489 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9f0763a6-8199-42ee-8388-0b10e1061fa2" (UID: "9f0763a6-8199-42ee-8388-0b10e1061fa2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.251647 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc76efe2-8f4e-49be-b747-865d5aa98156" (UID: "fc76efe2-8f4e-49be-b747-865d5aa98156"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.289403 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9fvdq"] Nov 24 17:52:38 crc kubenswrapper[4702]: W1124 17:52:38.293264 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod687de2b1_fcda_4c00_a295_1b5ee7ef64c2.slice/crio-c21059a65c8d86a288c07bcc8e1a823aaa96fc98b9460b0d079bd770050ff494 WatchSource:0}: Error finding container c21059a65c8d86a288c07bcc8e1a823aaa96fc98b9460b0d079bd770050ff494: Status 404 returned error can't find the container with id c21059a65c8d86a288c07bcc8e1a823aaa96fc98b9460b0d079bd770050ff494 Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301129 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301154 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301164 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc76efe2-8f4e-49be-b747-865d5aa98156-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301173 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f0763a6-8199-42ee-8388-0b10e1061fa2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301182 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c65d2141-2672-4f1c-a600-38ccf4af357d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301190 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptxd6\" (UniqueName: \"kubernetes.io/projected/fc76efe2-8f4e-49be-b747-865d5aa98156-kube-api-access-ptxd6\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.301200 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lphsk\" (UniqueName: \"kubernetes.io/projected/c65d2141-2672-4f1c-a600-38ccf4af357d-kube-api-access-lphsk\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.985407 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xbxf" event={"ID":"c65d2141-2672-4f1c-a600-38ccf4af357d","Type":"ContainerDied","Data":"5c78987b416f1fd711893eb59ab3e574d284cb9ddb852c392a968c89b3773c81"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.985772 4702 scope.go:117] "RemoveContainer" containerID="06c84f714475a4e00be5021d3ce2d7f91f5ae759b374f698e516f726b5de846c" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.985443 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xbxf" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.988076 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjgnw" event={"ID":"9f0763a6-8199-42ee-8388-0b10e1061fa2","Type":"ContainerDied","Data":"06b4f9916444ea9fa2a3bfc97f8e0f66ec0ab18ba4d87423dc1fa3f046799420"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.988127 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjgnw" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.989589 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" event={"ID":"687de2b1-fcda-4c00-a295-1b5ee7ef64c2","Type":"ContainerStarted","Data":"0105363609a369095e30981885ac906487ba79a0ecebedefaabcdf5864b5cd3d"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.989610 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" event={"ID":"687de2b1-fcda-4c00-a295-1b5ee7ef64c2","Type":"ContainerStarted","Data":"c21059a65c8d86a288c07bcc8e1a823aaa96fc98b9460b0d079bd770050ff494"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.990326 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.993936 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cccg8" event={"ID":"fc76efe2-8f4e-49be-b747-865d5aa98156","Type":"ContainerDied","Data":"6b38eb2bc1c708eb3931fa06a906d2c8693331b04bf004837ce0bd198ec541aa"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.994118 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cccg8" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.996960 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.999152 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt8ck" event={"ID":"74f9355e-0937-46d3-892a-a2eba2be98d6","Type":"ContainerDied","Data":"647c1ef798f33a5be7fa9bb59ba0e78934affd8388a3541089d4accd5249edd5"} Nov 24 17:52:38 crc kubenswrapper[4702]: I1124 17:52:38.999235 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt8ck" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.001024 4702 scope.go:117] "RemoveContainer" containerID="cdd8aa3833a2c3296567333c0f0423569277fb87f520973748f48d1ebc00af1a" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.015863 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9fvdq" podStartSLOduration=2.015832568 podStartE2EDuration="2.015832568s" podCreationTimestamp="2025-11-24 17:52:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:52:39.011628595 +0000 UTC m=+248.252369789" watchObservedRunningTime="2025-11-24 17:52:39.015832568 +0000 UTC m=+248.256573732" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.025357 4702 scope.go:117] "RemoveContainer" containerID="140156c9d61fcf2a845ade49045dc11c6ab62634afd4625ff9b819ef1e888ef2" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.045497 4702 scope.go:117] "RemoveContainer" containerID="144471d1e14b5cbe6d829ab9754de2d2ce5d7ba7660e91303b48dda2401e7ac6" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.052782 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.057597 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wjgnw"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.068566 4702 scope.go:117] "RemoveContainer" containerID="70cc1a7b380fa6011d51df60863bc0134383fa456941aa8f35e498df68762c01" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.077443 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.079318 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xbxf"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.082742 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.092913 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vt8ck"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.093707 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.097510 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cccg8"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.102114 4702 scope.go:117] "RemoveContainer" containerID="81b810d75212b2a2622625ce8710cfd75b5f6cf1182f59593a2855442517db09" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.113948 4702 scope.go:117] "RemoveContainer" containerID="69fe9ee27049121e36be5a7ae25b092d8f64d1ab84891496ae3d6bb5fdbda19b" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.127705 4702 scope.go:117] "RemoveContainer" containerID="172d65e1f1f4ce47e917c45d5b66805802f6319ff6efdf5277cbbd6e88f80292" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.139020 4702 scope.go:117] "RemoveContainer" containerID="7fb71b73f1f908237e2896d3c2dd3beea7807c5d54f7324b94826d2b31a37ed8" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.149846 4702 scope.go:117] "RemoveContainer" containerID="d45a41aed6f8ef2791f5587526bdb205e5670b9da8c884c36cfd03959788c320" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.160446 4702 scope.go:117] "RemoveContainer" containerID="66608f07fc1fb34977a249961820f817413738c1090844ae0595839490ade88e" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.173062 4702 scope.go:117] "RemoveContainer" containerID="588a1beb8c38b9014a06e593fb5645e592d26feb587298d987d3a92bc6903c9f" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.530851 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qnt56"] Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531097 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531112 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531122 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531130 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531138 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531145 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531157 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531166 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531176 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531183 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531192 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531200 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531212 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531220 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531232 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531239 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531250 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531257 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531266 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531274 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531284 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531291 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531302 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531309 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="extract-utilities" Nov 24 17:52:39 crc kubenswrapper[4702]: E1124 17:52:39.531320 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531326 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="extract-content" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531432 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531450 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531461 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531473 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" containerName="marketplace-operator" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.531486 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" containerName="registry-server" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.532311 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.534288 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.546190 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnt56"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.626897 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-catalog-content\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.626963 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtn2s\" (UniqueName: \"kubernetes.io/projected/8603ba78-8783-40be-b184-26376b1a6e9e-kube-api-access-rtn2s\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.627026 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-utilities\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.658273 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="206b06d8-9020-4e3f-b055-1a1bb10b0bcd" path="/var/lib/kubelet/pods/206b06d8-9020-4e3f-b055-1a1bb10b0bcd/volumes" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.658936 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74f9355e-0937-46d3-892a-a2eba2be98d6" path="/var/lib/kubelet/pods/74f9355e-0937-46d3-892a-a2eba2be98d6/volumes" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.659886 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f0763a6-8199-42ee-8388-0b10e1061fa2" path="/var/lib/kubelet/pods/9f0763a6-8199-42ee-8388-0b10e1061fa2/volumes" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.661335 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c65d2141-2672-4f1c-a600-38ccf4af357d" path="/var/lib/kubelet/pods/c65d2141-2672-4f1c-a600-38ccf4af357d/volumes" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.663235 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc76efe2-8f4e-49be-b747-865d5aa98156" path="/var/lib/kubelet/pods/fc76efe2-8f4e-49be-b747-865d5aa98156/volumes" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.728428 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtn2s\" (UniqueName: \"kubernetes.io/projected/8603ba78-8783-40be-b184-26376b1a6e9e-kube-api-access-rtn2s\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.728540 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-utilities\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.728680 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-catalog-content\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.729238 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-catalog-content\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.732215 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8603ba78-8783-40be-b184-26376b1a6e9e-utilities\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.739200 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sdfgt"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.740305 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.741869 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.752830 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdfgt"] Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.764503 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtn2s\" (UniqueName: \"kubernetes.io/projected/8603ba78-8783-40be-b184-26376b1a6e9e-kube-api-access-rtn2s\") pod \"redhat-marketplace-qnt56\" (UID: \"8603ba78-8783-40be-b184-26376b1a6e9e\") " pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.829737 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-utilities\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.829780 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-catalog-content\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.829972 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwxcc\" (UniqueName: \"kubernetes.io/projected/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-kube-api-access-bwxcc\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.844261 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.931534 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwxcc\" (UniqueName: \"kubernetes.io/projected/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-kube-api-access-bwxcc\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.931590 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-utilities\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.931610 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-catalog-content\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.932135 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-catalog-content\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.932340 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-utilities\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:39 crc kubenswrapper[4702]: I1124 17:52:39.955012 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwxcc\" (UniqueName: \"kubernetes.io/projected/546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7-kube-api-access-bwxcc\") pod \"redhat-operators-sdfgt\" (UID: \"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7\") " pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:40 crc kubenswrapper[4702]: I1124 17:52:40.052135 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:40 crc kubenswrapper[4702]: I1124 17:52:40.230752 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnt56"] Nov 24 17:52:40 crc kubenswrapper[4702]: W1124 17:52:40.235127 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8603ba78_8783_40be_b184_26376b1a6e9e.slice/crio-f6e9fa392c0021447a05c57ff3fb1b8c82ccd815a454a5718c6457cbb465a7ad WatchSource:0}: Error finding container f6e9fa392c0021447a05c57ff3fb1b8c82ccd815a454a5718c6457cbb465a7ad: Status 404 returned error can't find the container with id f6e9fa392c0021447a05c57ff3fb1b8c82ccd815a454a5718c6457cbb465a7ad Nov 24 17:52:40 crc kubenswrapper[4702]: I1124 17:52:40.424514 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdfgt"] Nov 24 17:52:40 crc kubenswrapper[4702]: W1124 17:52:40.431309 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod546f6b1b_0fd6_48ee_bce9_08ecb6cfa3a7.slice/crio-5d226c76377a132342af838bd62147a3a38556ed0820afc65e91f3022ee4e04f WatchSource:0}: Error finding container 5d226c76377a132342af838bd62147a3a38556ed0820afc65e91f3022ee4e04f: Status 404 returned error can't find the container with id 5d226c76377a132342af838bd62147a3a38556ed0820afc65e91f3022ee4e04f Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.016173 4702 generic.go:334] "Generic (PLEG): container finished" podID="8603ba78-8783-40be-b184-26376b1a6e9e" containerID="7c5b0e5bb25b878a212f3ea1f70b8436e83be04a49676e2494cb8eae097a9eb3" exitCode=0 Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.016237 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnt56" event={"ID":"8603ba78-8783-40be-b184-26376b1a6e9e","Type":"ContainerDied","Data":"7c5b0e5bb25b878a212f3ea1f70b8436e83be04a49676e2494cb8eae097a9eb3"} Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.016319 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnt56" event={"ID":"8603ba78-8783-40be-b184-26376b1a6e9e","Type":"ContainerStarted","Data":"f6e9fa392c0021447a05c57ff3fb1b8c82ccd815a454a5718c6457cbb465a7ad"} Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.017661 4702 generic.go:334] "Generic (PLEG): container finished" podID="546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7" containerID="3b2f47747a39af76a4fdd881c80ab911f3b51c24854ea92999bd932a35de4334" exitCode=0 Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.017699 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdfgt" event={"ID":"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7","Type":"ContainerDied","Data":"3b2f47747a39af76a4fdd881c80ab911f3b51c24854ea92999bd932a35de4334"} Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.017745 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdfgt" event={"ID":"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7","Type":"ContainerStarted","Data":"5d226c76377a132342af838bd62147a3a38556ed0820afc65e91f3022ee4e04f"} Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.933000 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z67lw"] Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.934389 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.937458 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 17:52:41 crc kubenswrapper[4702]: I1124 17:52:41.946586 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z67lw"] Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.054735 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-catalog-content\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.054843 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfpv2\" (UniqueName: \"kubernetes.io/projected/4a27a95c-78d8-428d-8437-2469bee7ddad-kube-api-access-sfpv2\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.055113 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-utilities\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.132332 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xt5hs"] Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.133289 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.135333 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.143314 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xt5hs"] Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.156257 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfpv2\" (UniqueName: \"kubernetes.io/projected/4a27a95c-78d8-428d-8437-2469bee7ddad-kube-api-access-sfpv2\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.156344 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-utilities\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.156376 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-catalog-content\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.156850 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-catalog-content\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.157509 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a27a95c-78d8-428d-8437-2469bee7ddad-utilities\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.177341 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfpv2\" (UniqueName: \"kubernetes.io/projected/4a27a95c-78d8-428d-8437-2469bee7ddad-kube-api-access-sfpv2\") pod \"community-operators-z67lw\" (UID: \"4a27a95c-78d8-428d-8437-2469bee7ddad\") " pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.256977 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.257252 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpdvr\" (UniqueName: \"kubernetes.io/projected/806486ea-2ebb-4171-915c-69170eaf3967-kube-api-access-zpdvr\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.257323 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-catalog-content\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.257353 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-utilities\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.358666 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-catalog-content\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.358715 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-utilities\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.358767 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpdvr\" (UniqueName: \"kubernetes.io/projected/806486ea-2ebb-4171-915c-69170eaf3967-kube-api-access-zpdvr\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.359659 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-catalog-content\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.359875 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/806486ea-2ebb-4171-915c-69170eaf3967-utilities\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.387827 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpdvr\" (UniqueName: \"kubernetes.io/projected/806486ea-2ebb-4171-915c-69170eaf3967-kube-api-access-zpdvr\") pod \"certified-operators-xt5hs\" (UID: \"806486ea-2ebb-4171-915c-69170eaf3967\") " pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.448615 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.701247 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z67lw"] Nov 24 17:52:42 crc kubenswrapper[4702]: W1124 17:52:42.723622 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a27a95c_78d8_428d_8437_2469bee7ddad.slice/crio-2ba6bbc970a39b368054f50c2f9f1603e9d1a0a0b5520d2d34e3525e1fbe1720 WatchSource:0}: Error finding container 2ba6bbc970a39b368054f50c2f9f1603e9d1a0a0b5520d2d34e3525e1fbe1720: Status 404 returned error can't find the container with id 2ba6bbc970a39b368054f50c2f9f1603e9d1a0a0b5520d2d34e3525e1fbe1720 Nov 24 17:52:42 crc kubenswrapper[4702]: I1124 17:52:42.825435 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xt5hs"] Nov 24 17:52:42 crc kubenswrapper[4702]: W1124 17:52:42.830765 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod806486ea_2ebb_4171_915c_69170eaf3967.slice/crio-c5ab55cf13b60cca38c4040ba23c6408d99325f2b4f52c0b520d52640d2f485a WatchSource:0}: Error finding container c5ab55cf13b60cca38c4040ba23c6408d99325f2b4f52c0b520d52640d2f485a: Status 404 returned error can't find the container with id c5ab55cf13b60cca38c4040ba23c6408d99325f2b4f52c0b520d52640d2f485a Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.027840 4702 generic.go:334] "Generic (PLEG): container finished" podID="8603ba78-8783-40be-b184-26376b1a6e9e" containerID="06978755eae30915f2263d877df4c97ffc0dee45f1be5e8517bd73d0088cd7e7" exitCode=0 Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.027976 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnt56" event={"ID":"8603ba78-8783-40be-b184-26376b1a6e9e","Type":"ContainerDied","Data":"06978755eae30915f2263d877df4c97ffc0dee45f1be5e8517bd73d0088cd7e7"} Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.032395 4702 generic.go:334] "Generic (PLEG): container finished" podID="806486ea-2ebb-4171-915c-69170eaf3967" containerID="3ed2e08c210d620a25a28368e9d52287a67a92ba6df59c2ebce8e834e2d79ee5" exitCode=0 Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.032448 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xt5hs" event={"ID":"806486ea-2ebb-4171-915c-69170eaf3967","Type":"ContainerDied","Data":"3ed2e08c210d620a25a28368e9d52287a67a92ba6df59c2ebce8e834e2d79ee5"} Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.032471 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xt5hs" event={"ID":"806486ea-2ebb-4171-915c-69170eaf3967","Type":"ContainerStarted","Data":"c5ab55cf13b60cca38c4040ba23c6408d99325f2b4f52c0b520d52640d2f485a"} Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.037449 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdfgt" event={"ID":"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7","Type":"ContainerDied","Data":"7e6fc33a6d3bb20fd6550964c7e2b5ddc0c24f4c640c428c930043a276f849be"} Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.036996 4702 generic.go:334] "Generic (PLEG): container finished" podID="546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7" containerID="7e6fc33a6d3bb20fd6550964c7e2b5ddc0c24f4c640c428c930043a276f849be" exitCode=0 Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.043174 4702 generic.go:334] "Generic (PLEG): container finished" podID="4a27a95c-78d8-428d-8437-2469bee7ddad" containerID="2296bf8a07f23f0a7d3771d257c836c74d0e96e5f6df9263d46b589e7d8795a1" exitCode=0 Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.043212 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z67lw" event={"ID":"4a27a95c-78d8-428d-8437-2469bee7ddad","Type":"ContainerDied","Data":"2296bf8a07f23f0a7d3771d257c836c74d0e96e5f6df9263d46b589e7d8795a1"} Nov 24 17:52:43 crc kubenswrapper[4702]: I1124 17:52:43.043237 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z67lw" event={"ID":"4a27a95c-78d8-428d-8437-2469bee7ddad","Type":"ContainerStarted","Data":"2ba6bbc970a39b368054f50c2f9f1603e9d1a0a0b5520d2d34e3525e1fbe1720"} Nov 24 17:52:44 crc kubenswrapper[4702]: I1124 17:52:44.055168 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xt5hs" event={"ID":"806486ea-2ebb-4171-915c-69170eaf3967","Type":"ContainerStarted","Data":"5420266b4eb6868d65cd067f09c3f5fb5e40a382f66163b819fbb5c067531475"} Nov 24 17:52:44 crc kubenswrapper[4702]: I1124 17:52:44.060365 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdfgt" event={"ID":"546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7","Type":"ContainerStarted","Data":"57523d3d0860d4ed688024f1d722c09394a371f05b73e2c952153b03392d03b3"} Nov 24 17:52:44 crc kubenswrapper[4702]: I1124 17:52:44.087170 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sdfgt" podStartSLOduration=2.301797486 podStartE2EDuration="5.087147967s" podCreationTimestamp="2025-11-24 17:52:39 +0000 UTC" firstStartedPulling="2025-11-24 17:52:41.019095184 +0000 UTC m=+250.259836348" lastFinishedPulling="2025-11-24 17:52:43.804445655 +0000 UTC m=+253.045186829" observedRunningTime="2025-11-24 17:52:44.087142227 +0000 UTC m=+253.327883391" watchObservedRunningTime="2025-11-24 17:52:44.087147967 +0000 UTC m=+253.327889131" Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.067208 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnt56" event={"ID":"8603ba78-8783-40be-b184-26376b1a6e9e","Type":"ContainerStarted","Data":"0bfd76d0b1f307b6e729bf40de09b3793b3ac58150cf1ec6a25acd0657ab20ca"} Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.069522 4702 generic.go:334] "Generic (PLEG): container finished" podID="806486ea-2ebb-4171-915c-69170eaf3967" containerID="5420266b4eb6868d65cd067f09c3f5fb5e40a382f66163b819fbb5c067531475" exitCode=0 Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.069550 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xt5hs" event={"ID":"806486ea-2ebb-4171-915c-69170eaf3967","Type":"ContainerDied","Data":"5420266b4eb6868d65cd067f09c3f5fb5e40a382f66163b819fbb5c067531475"} Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.072213 4702 generic.go:334] "Generic (PLEG): container finished" podID="4a27a95c-78d8-428d-8437-2469bee7ddad" containerID="1d1d49e9a01f1ebae1ee0a1561520a0a46feba33c477b9da2812a3fe0e39e608" exitCode=0 Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.072534 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z67lw" event={"ID":"4a27a95c-78d8-428d-8437-2469bee7ddad","Type":"ContainerDied","Data":"1d1d49e9a01f1ebae1ee0a1561520a0a46feba33c477b9da2812a3fe0e39e608"} Nov 24 17:52:45 crc kubenswrapper[4702]: I1124 17:52:45.083890 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qnt56" podStartSLOduration=3.075278077 podStartE2EDuration="6.08387183s" podCreationTimestamp="2025-11-24 17:52:39 +0000 UTC" firstStartedPulling="2025-11-24 17:52:41.017566799 +0000 UTC m=+250.258307963" lastFinishedPulling="2025-11-24 17:52:44.026160552 +0000 UTC m=+253.266901716" observedRunningTime="2025-11-24 17:52:45.081853181 +0000 UTC m=+254.322594365" watchObservedRunningTime="2025-11-24 17:52:45.08387183 +0000 UTC m=+254.324613004" Nov 24 17:52:47 crc kubenswrapper[4702]: I1124 17:52:47.086171 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z67lw" event={"ID":"4a27a95c-78d8-428d-8437-2469bee7ddad","Type":"ContainerStarted","Data":"1c4d55ff2935fbe9b51e1ef0c957278c36606e20c84e3650ce2fbd6932f03cd0"} Nov 24 17:52:47 crc kubenswrapper[4702]: I1124 17:52:47.087784 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xt5hs" event={"ID":"806486ea-2ebb-4171-915c-69170eaf3967","Type":"ContainerStarted","Data":"e1f5414c4608d229ed5cabf4b136eedf2bdecbf5dfb6ff34b9ea6149dc455203"} Nov 24 17:52:47 crc kubenswrapper[4702]: I1124 17:52:47.105023 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z67lw" podStartSLOduration=3.454063969 podStartE2EDuration="6.105006862s" podCreationTimestamp="2025-11-24 17:52:41 +0000 UTC" firstStartedPulling="2025-11-24 17:52:43.045350807 +0000 UTC m=+252.286091971" lastFinishedPulling="2025-11-24 17:52:45.6962937 +0000 UTC m=+254.937034864" observedRunningTime="2025-11-24 17:52:47.104176908 +0000 UTC m=+256.344918092" watchObservedRunningTime="2025-11-24 17:52:47.105006862 +0000 UTC m=+256.345748026" Nov 24 17:52:47 crc kubenswrapper[4702]: I1124 17:52:47.122003 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xt5hs" podStartSLOduration=2.5236221260000002 podStartE2EDuration="5.121987072s" podCreationTimestamp="2025-11-24 17:52:42 +0000 UTC" firstStartedPulling="2025-11-24 17:52:43.035050653 +0000 UTC m=+252.275791817" lastFinishedPulling="2025-11-24 17:52:45.633415599 +0000 UTC m=+254.874156763" observedRunningTime="2025-11-24 17:52:47.120605101 +0000 UTC m=+256.361346275" watchObservedRunningTime="2025-11-24 17:52:47.121987072 +0000 UTC m=+256.362728236" Nov 24 17:52:49 crc kubenswrapper[4702]: I1124 17:52:49.845168 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:49 crc kubenswrapper[4702]: I1124 17:52:49.845391 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:49 crc kubenswrapper[4702]: I1124 17:52:49.889030 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:50 crc kubenswrapper[4702]: I1124 17:52:50.052781 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:50 crc kubenswrapper[4702]: I1124 17:52:50.052850 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:50 crc kubenswrapper[4702]: I1124 17:52:50.095946 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:50 crc kubenswrapper[4702]: I1124 17:52:50.136626 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qnt56" Nov 24 17:52:50 crc kubenswrapper[4702]: I1124 17:52:50.139862 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sdfgt" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.258548 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.258620 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.295233 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.449726 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.449790 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:52 crc kubenswrapper[4702]: I1124 17:52:52.486089 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:53 crc kubenswrapper[4702]: I1124 17:52:53.157403 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xt5hs" Nov 24 17:52:53 crc kubenswrapper[4702]: I1124 17:52:53.160967 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z67lw" Nov 24 17:54:22 crc kubenswrapper[4702]: I1124 17:54:22.482670 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:54:22 crc kubenswrapper[4702]: I1124 17:54:22.483353 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:54:52 crc kubenswrapper[4702]: I1124 17:54:52.482987 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:54:52 crc kubenswrapper[4702]: I1124 17:54:52.483551 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.572497 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hmmhz"] Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.575245 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.588404 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hmmhz"] Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768189 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768243 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-trusted-ca\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768272 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/01030177-fd89-4df8-ac4c-1098ec0fb4b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768288 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-certificates\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768307 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/01030177-fd89-4df8-ac4c-1098ec0fb4b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768329 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl5jw\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-kube-api-access-tl5jw\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768370 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-tls\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.768392 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-bound-sa-token\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.793737 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.869954 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-certificates\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870266 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/01030177-fd89-4df8-ac4c-1098ec0fb4b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870354 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/01030177-fd89-4df8-ac4c-1098ec0fb4b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870448 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5jw\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-kube-api-access-tl5jw\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870547 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-tls\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870617 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-bound-sa-token\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870700 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-trusted-ca\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.870716 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/01030177-fd89-4df8-ac4c-1098ec0fb4b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.871295 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-certificates\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.872067 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01030177-fd89-4df8-ac4c-1098ec0fb4b1-trusted-ca\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.879285 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/01030177-fd89-4df8-ac4c-1098ec0fb4b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.879392 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-registry-tls\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.886945 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-bound-sa-token\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.887182 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl5jw\" (UniqueName: \"kubernetes.io/projected/01030177-fd89-4df8-ac4c-1098ec0fb4b1-kube-api-access-tl5jw\") pod \"image-registry-66df7c8f76-hmmhz\" (UID: \"01030177-fd89-4df8-ac4c-1098ec0fb4b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:18 crc kubenswrapper[4702]: I1124 17:55:18.890175 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:19 crc kubenswrapper[4702]: I1124 17:55:19.051158 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hmmhz"] Nov 24 17:55:19 crc kubenswrapper[4702]: I1124 17:55:19.958023 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" event={"ID":"01030177-fd89-4df8-ac4c-1098ec0fb4b1","Type":"ContainerStarted","Data":"180565e91680a18f0f6d84104b7a1d70d5d0d09bc3cb137ac53e7259d764e36c"} Nov 24 17:55:19 crc kubenswrapper[4702]: I1124 17:55:19.958943 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" event={"ID":"01030177-fd89-4df8-ac4c-1098ec0fb4b1","Type":"ContainerStarted","Data":"d77b91f7772c94f8f2d026f6dd3ade8740093b5e9d43cba8a0a5634c2f18f2c3"} Nov 24 17:55:19 crc kubenswrapper[4702]: I1124 17:55:19.958989 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:19 crc kubenswrapper[4702]: I1124 17:55:19.981512 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" podStartSLOduration=1.9814923389999999 podStartE2EDuration="1.981492339s" podCreationTimestamp="2025-11-24 17:55:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:55:19.979516363 +0000 UTC m=+409.220257597" watchObservedRunningTime="2025-11-24 17:55:19.981492339 +0000 UTC m=+409.222233513" Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.483711 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.484223 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.484297 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.485166 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.485275 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3" gracePeriod=600 Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.977261 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3" exitCode=0 Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.977582 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3"} Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.977613 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815"} Nov 24 17:55:22 crc kubenswrapper[4702]: I1124 17:55:22.977633 4702 scope.go:117] "RemoveContainer" containerID="086733c2f2e31737263a0d4602fa31f53ef705f28304f3facaf2e786819eae4f" Nov 24 17:55:38 crc kubenswrapper[4702]: I1124 17:55:38.897327 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-hmmhz" Nov 24 17:55:38 crc kubenswrapper[4702]: I1124 17:55:38.955330 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.011264 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" podUID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" containerName="registry" containerID="cri-o://7c2d4d04c88a3a10006770481abb0dca6ee2eef732006bb79c5ae6a0bb04cb1c" gracePeriod=30 Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.203118 4702 generic.go:334] "Generic (PLEG): container finished" podID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" containerID="7c2d4d04c88a3a10006770481abb0dca6ee2eef732006bb79c5ae6a0bb04cb1c" exitCode=0 Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.203201 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" event={"ID":"dcbd5701-dced-406b-8cf3-0366e6c0f95b","Type":"ContainerDied","Data":"7c2d4d04c88a3a10006770481abb0dca6ee2eef732006bb79c5ae6a0bb04cb1c"} Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.333597 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481398 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481475 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481528 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481579 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481603 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481639 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481766 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.481834 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rhb6\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6\") pod \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\" (UID: \"dcbd5701-dced-406b-8cf3-0366e6c0f95b\") " Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.482675 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.482776 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.487230 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.487490 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6" (OuterVolumeSpecName: "kube-api-access-8rhb6") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "kube-api-access-8rhb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.487687 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.487962 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.496624 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.498021 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "dcbd5701-dced-406b-8cf3-0366e6c0f95b" (UID: "dcbd5701-dced-406b-8cf3-0366e6c0f95b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582837 4702 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582881 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rhb6\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-kube-api-access-8rhb6\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582897 4702 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dcbd5701-dced-406b-8cf3-0366e6c0f95b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582908 4702 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582920 4702 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcbd5701-dced-406b-8cf3-0366e6c0f95b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582929 4702 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dcbd5701-dced-406b-8cf3-0366e6c0f95b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:04 crc kubenswrapper[4702]: I1124 17:56:04.582938 4702 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dcbd5701-dced-406b-8cf3-0366e6c0f95b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.209625 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" event={"ID":"dcbd5701-dced-406b-8cf3-0366e6c0f95b","Type":"ContainerDied","Data":"2b28386e8b6288edd9594787c640fc33b06c2d13217f211da415abaea90d92d6"} Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.209679 4702 scope.go:117] "RemoveContainer" containerID="7c2d4d04c88a3a10006770481abb0dca6ee2eef732006bb79c5ae6a0bb04cb1c" Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.209690 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5lncr" Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.237705 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.241607 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5lncr"] Nov 24 17:56:05 crc kubenswrapper[4702]: I1124 17:56:05.656116 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" path="/var/lib/kubelet/pods/dcbd5701-dced-406b-8cf3-0366e6c0f95b/volumes" Nov 24 17:57:22 crc kubenswrapper[4702]: I1124 17:57:22.483398 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:57:22 crc kubenswrapper[4702]: I1124 17:57:22.483974 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:57:52 crc kubenswrapper[4702]: I1124 17:57:52.482689 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:57:52 crc kubenswrapper[4702]: I1124 17:57:52.483165 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.572825 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-f5g6n"] Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.573727 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-controller" containerID="cri-o://6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.573832 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="nbdb" containerID="cri-o://530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.573883 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.573931 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-node" containerID="cri-o://48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.573869 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="northd" containerID="cri-o://79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.574062 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="sbdb" containerID="cri-o://7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.574084 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-acl-logging" containerID="cri-o://3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.608648 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" containerID="cri-o://e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6" gracePeriod=30 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.882016 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/4.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.882928 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/3.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.884930 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-acl-logging/0.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885391 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-controller/0.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885861 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6" exitCode=2 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885885 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f" exitCode=0 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885893 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4" exitCode=0 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885902 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a" exitCode=0 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885908 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896" exitCode=0 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885915 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab" exitCode=143 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885925 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2" exitCode=143 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885942 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885977 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.885993 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.886008 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.886018 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.886029 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.886041 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.886060 4702 scope.go:117] "RemoveContainer" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.888525 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/2.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.888960 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/1.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.889008 4702 generic.go:334] "Generic (PLEG): container finished" podID="f4859751-212a-4d94-b0c7-875b1da99cd8" containerID="fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf" exitCode=2 Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.889036 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerDied","Data":"fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf"} Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.889492 4702 scope.go:117] "RemoveContainer" containerID="fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf" Nov 24 17:58:06 crc kubenswrapper[4702]: E1124 17:58:06.889813 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8g6cn_openshift-multus(f4859751-212a-4d94-b0c7-875b1da99cd8)\"" pod="openshift-multus/multus-8g6cn" podUID="f4859751-212a-4d94-b0c7-875b1da99cd8" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.947016 4702 scope.go:117] "RemoveContainer" containerID="d69835ec364313c74f5868e4b56e8d5b34c8b38d206fd0af705538eaa835ca3e" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.947302 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/4.log" Nov 24 17:58:06 crc kubenswrapper[4702]: E1124 17:58:06.947708 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c\": container with ID starting with 60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c not found: ID does not exist" containerID="60b6fc130b9dda618d40aa2089d5d79128c6db81f1e4141e9444ea765076029c" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.949693 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-acl-logging/0.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.950263 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-controller/0.log" Nov 24 17:58:06 crc kubenswrapper[4702]: I1124 17:58:06.950668 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000617 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-88dpn"] Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000887 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="northd" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000911 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="northd" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000921 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="nbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000930 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="nbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000942 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="sbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000951 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="sbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000960 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000967 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000982 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.000989 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.000998 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001006 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001016 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001024 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001037 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" containerName="registry" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001045 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" containerName="registry" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001052 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-node" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001060 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-node" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001072 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kubecfg-setup" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001081 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kubecfg-setup" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001091 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001100 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001109 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001117 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001130 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-acl-logging" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001138 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-acl-logging" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001243 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001259 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcbd5701-dced-406b-8cf3-0366e6c0f95b" containerName="registry" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001270 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001279 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovn-acl-logging" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001290 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="sbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001301 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001308 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-node" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001317 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001326 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="northd" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001335 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001348 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="nbdb" Nov 24 17:58:07 crc kubenswrapper[4702]: E1124 17:58:07.001457 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001468 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001564 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.001584 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerName="ovnkube-controller" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.003479 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079239 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079290 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079316 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079350 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079384 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079368 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079370 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079407 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079420 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wkcd\" (UniqueName: \"kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079462 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log" (OuterVolumeSpecName: "node-log") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079516 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079497 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079548 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079576 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079597 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079611 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079632 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079666 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079683 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079767 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079846 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash" (OuterVolumeSpecName: "host-slash") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079899 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079905 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket" (OuterVolumeSpecName: "log-socket") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079872 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079865 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079944 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079963 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079965 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.079989 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080016 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080030 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch\") pod \"0d4b86a8-9180-41ee-b240-0071bdc994da\" (UID: \"0d4b86a8-9180-41ee-b240-0071bdc994da\") " Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080049 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080070 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080088 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080104 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080185 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080543 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080645 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-node-log\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080689 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-netd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080711 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-config\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080731 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0abc453-96ef-477d-bea0-f7725fa48d20-ovn-node-metrics-cert\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080753 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080768 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-kubelet\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080785 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-systemd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080876 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-etc-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080916 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-slash\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080939 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080968 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-systemd-units\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.080991 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-env-overrides\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081037 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-netns\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081064 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-ovn\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081150 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwdqq\" (UniqueName: \"kubernetes.io/projected/b0abc453-96ef-477d-bea0-f7725fa48d20-kube-api-access-dwdqq\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081184 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081211 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-script-lib\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081239 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-bin\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081262 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-var-lib-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081295 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-log-socket\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081363 4702 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081379 4702 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081391 4702 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-node-log\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081402 4702 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081414 4702 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081426 4702 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081438 4702 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081450 4702 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081462 4702 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-slash\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081475 4702 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081486 4702 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-log-socket\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081499 4702 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081510 4702 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081521 4702 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4b86a8-9180-41ee-b240-0071bdc994da-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081535 4702 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081548 4702 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.081560 4702 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.084831 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd" (OuterVolumeSpecName: "kube-api-access-6wkcd") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "kube-api-access-6wkcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.084875 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.091785 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "0d4b86a8-9180-41ee-b240-0071bdc994da" (UID: "0d4b86a8-9180-41ee-b240-0071bdc994da"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182042 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182102 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-script-lib\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182128 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-bin\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182150 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-var-lib-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182176 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-log-socket\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182185 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182205 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-node-log\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182234 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-node-log\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182236 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-netd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182252 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-netd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182268 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-config\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182277 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-var-lib-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182286 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0abc453-96ef-477d-bea0-f7725fa48d20-ovn-node-metrics-cert\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182303 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182315 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-log-socket\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182293 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-cni-bin\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182362 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182343 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-kubelet\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182321 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-kubelet\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182426 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-systemd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182469 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-etc-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182488 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-slash\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182506 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182528 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-systemd-units\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182554 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-env-overrides\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182582 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-netns\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182598 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-ovn\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182669 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwdqq\" (UniqueName: \"kubernetes.io/projected/b0abc453-96ef-477d-bea0-f7725fa48d20-kube-api-access-dwdqq\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182715 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182726 4702 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4b86a8-9180-41ee-b240-0071bdc994da-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182754 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wkcd\" (UniqueName: \"kubernetes.io/projected/0d4b86a8-9180-41ee-b240-0071bdc994da-kube-api-access-6wkcd\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182772 4702 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4b86a8-9180-41ee-b240-0071bdc994da-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182817 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-systemd\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182848 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-etc-openvswitch\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182933 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-config\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182981 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-systemd-units\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.182988 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-run-netns\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.183036 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-run-ovn\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.183133 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0abc453-96ef-477d-bea0-f7725fa48d20-host-slash\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.183147 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-ovnkube-script-lib\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.183262 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0abc453-96ef-477d-bea0-f7725fa48d20-env-overrides\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.185337 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0abc453-96ef-477d-bea0-f7725fa48d20-ovn-node-metrics-cert\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.200054 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwdqq\" (UniqueName: \"kubernetes.io/projected/b0abc453-96ef-477d-bea0-f7725fa48d20-kube-api-access-dwdqq\") pod \"ovnkube-node-88dpn\" (UID: \"b0abc453-96ef-477d-bea0-f7725fa48d20\") " pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.322593 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.895014 4702 generic.go:334] "Generic (PLEG): container finished" podID="b0abc453-96ef-477d-bea0-f7725fa48d20" containerID="d0c4aae365009a42ce303027869dbc6118c0a60fbd6e62b98b0d7625f33f3775" exitCode=0 Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.895095 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerDied","Data":"d0c4aae365009a42ce303027869dbc6118c0a60fbd6e62b98b0d7625f33f3775"} Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.895358 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"50054a5a97a1bdbf95af646718511204d8062daa631543cf3fe9e7d1506b0a84"} Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.898751 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovnkube-controller/4.log" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.902069 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-acl-logging/0.log" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.902617 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-f5g6n_0d4b86a8-9180-41ee-b240-0071bdc994da/ovn-controller/0.log" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.902996 4702 generic.go:334] "Generic (PLEG): container finished" podID="0d4b86a8-9180-41ee-b240-0071bdc994da" containerID="79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d" exitCode=0 Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.903073 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d"} Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.903101 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" event={"ID":"0d4b86a8-9180-41ee-b240-0071bdc994da","Type":"ContainerDied","Data":"51e341ebc0666948185e993ba5f1faaab024c08cd1cc3ba2c47a14b6bf087568"} Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.903132 4702 scope.go:117] "RemoveContainer" containerID="e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.903151 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f5g6n" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.905139 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/2.log" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.918972 4702 scope.go:117] "RemoveContainer" containerID="7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.944528 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-f5g6n"] Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.947096 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-f5g6n"] Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.954379 4702 scope.go:117] "RemoveContainer" containerID="530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.976066 4702 scope.go:117] "RemoveContainer" containerID="79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d" Nov 24 17:58:07 crc kubenswrapper[4702]: I1124 17:58:07.999259 4702 scope.go:117] "RemoveContainer" containerID="4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.011681 4702 scope.go:117] "RemoveContainer" containerID="48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.023081 4702 scope.go:117] "RemoveContainer" containerID="3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.037262 4702 scope.go:117] "RemoveContainer" containerID="6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.050868 4702 scope.go:117] "RemoveContainer" containerID="7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.074677 4702 scope.go:117] "RemoveContainer" containerID="e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.075191 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6\": container with ID starting with e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6 not found: ID does not exist" containerID="e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.075221 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6"} err="failed to get container status \"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6\": rpc error: code = NotFound desc = could not find container \"e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6\": container with ID starting with e88ef83eaa32b1fec691a573631b6b8d114705b5bf11e5d542a6c0d3fdbb45a6 not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.075240 4702 scope.go:117] "RemoveContainer" containerID="7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.075611 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\": container with ID starting with 7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f not found: ID does not exist" containerID="7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.075654 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f"} err="failed to get container status \"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\": rpc error: code = NotFound desc = could not find container \"7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f\": container with ID starting with 7ce358a698433542141f2fb198eb2de40c7843d6856b732c6907d5fdc5f1920f not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.075683 4702 scope.go:117] "RemoveContainer" containerID="530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.076012 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\": container with ID starting with 530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4 not found: ID does not exist" containerID="530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076040 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4"} err="failed to get container status \"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\": rpc error: code = NotFound desc = could not find container \"530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4\": container with ID starting with 530b3e20f2c83bf7d91310e1f97d2fdf0f60e3f34d4a5051bd6ea11f387e37b4 not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076053 4702 scope.go:117] "RemoveContainer" containerID="79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.076308 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\": container with ID starting with 79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d not found: ID does not exist" containerID="79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076330 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d"} err="failed to get container status \"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\": rpc error: code = NotFound desc = could not find container \"79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d\": container with ID starting with 79a66fc7911e56575fd83e3f04c6b60a3b5030076624527c311e08d0a470533d not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076344 4702 scope.go:117] "RemoveContainer" containerID="4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.076621 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\": container with ID starting with 4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a not found: ID does not exist" containerID="4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076669 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a"} err="failed to get container status \"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\": rpc error: code = NotFound desc = could not find container \"4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a\": container with ID starting with 4034460017a6f2b03351cfb7ec6aeb1e9063f07e5a955940ca437739b5053b6a not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.076688 4702 scope.go:117] "RemoveContainer" containerID="48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.077083 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\": container with ID starting with 48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896 not found: ID does not exist" containerID="48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.077125 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896"} err="failed to get container status \"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\": rpc error: code = NotFound desc = could not find container \"48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896\": container with ID starting with 48195a7df881979155bd316faaa9952b90071f735a06271be7c39a66952f0896 not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.077140 4702 scope.go:117] "RemoveContainer" containerID="3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.077552 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\": container with ID starting with 3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab not found: ID does not exist" containerID="3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.077599 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab"} err="failed to get container status \"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\": rpc error: code = NotFound desc = could not find container \"3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab\": container with ID starting with 3462dae1a7ee15e8c437509e1d0ae00861d1ee903d2edc9e49afb899862437ab not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.077616 4702 scope.go:117] "RemoveContainer" containerID="6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.078019 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\": container with ID starting with 6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2 not found: ID does not exist" containerID="6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.078043 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2"} err="failed to get container status \"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\": rpc error: code = NotFound desc = could not find container \"6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2\": container with ID starting with 6c18fe9cce9eabadbd457f05adac8b76b47cde87043ea4f98fbf8eac10ca7ce2 not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.078059 4702 scope.go:117] "RemoveContainer" containerID="7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742" Nov 24 17:58:08 crc kubenswrapper[4702]: E1124 17:58:08.078336 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\": container with ID starting with 7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742 not found: ID does not exist" containerID="7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.078522 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742"} err="failed to get container status \"7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\": rpc error: code = NotFound desc = could not find container \"7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742\": container with ID starting with 7930fe0d637c829a5695cd67a53d6a528e5ba13dc301055427275084d7bc3742 not found: ID does not exist" Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914074 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"97431a5d78105236fd90760c0c16d19b2cb3823bc64c4d7e89937203cbe04ce6"} Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914366 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"f9f3b48b49d4a43bd375c54dceaaee05c8ff636c74f0938a58f8859da82a3608"} Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914377 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"69a6d59599ecdbefef7b35bc3fec40572cb70f4e9e5f3925d0ea22d5afb43aed"} Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914385 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"0d97828a10ed883ff7eb07a30d642ec13cc7e563de8b3d2f5306fcfe5160bb91"} Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914392 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"02dfb004aaca9f8df294379e3d6e368a85d427afc5ba876db2d1c9efe7139073"} Nov 24 17:58:08 crc kubenswrapper[4702]: I1124 17:58:08.914401 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"303e84f2367658f415f4709a2d0a0219d977382a557d54dd65eca18165769786"} Nov 24 17:58:09 crc kubenswrapper[4702]: I1124 17:58:09.659873 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d4b86a8-9180-41ee-b240-0071bdc994da" path="/var/lib/kubelet/pods/0d4b86a8-9180-41ee-b240-0071bdc994da/volumes" Nov 24 17:58:10 crc kubenswrapper[4702]: I1124 17:58:10.929785 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"c5653ae49cf70619ddf1fb5439d3cbfc36422b3544cd8b4173a81b99a02a234e"} Nov 24 17:58:13 crc kubenswrapper[4702]: I1124 17:58:13.951149 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" event={"ID":"b0abc453-96ef-477d-bea0-f7725fa48d20","Type":"ContainerStarted","Data":"63b951435ef64fe055f5d202bd4f15034d26f7e0520c46c2103a6a3b288312cb"} Nov 24 17:58:13 crc kubenswrapper[4702]: I1124 17:58:13.951858 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:13 crc kubenswrapper[4702]: I1124 17:58:13.991353 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:14 crc kubenswrapper[4702]: I1124 17:58:14.001849 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" podStartSLOduration=8.001828115 podStartE2EDuration="8.001828115s" podCreationTimestamp="2025-11-24 17:58:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:58:13.999457768 +0000 UTC m=+583.240198962" watchObservedRunningTime="2025-11-24 17:58:14.001828115 +0000 UTC m=+583.242569329" Nov 24 17:58:14 crc kubenswrapper[4702]: I1124 17:58:14.955825 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:14 crc kubenswrapper[4702]: I1124 17:58:14.955886 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:14 crc kubenswrapper[4702]: I1124 17:58:14.982085 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:20 crc kubenswrapper[4702]: I1124 17:58:20.648265 4702 scope.go:117] "RemoveContainer" containerID="fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf" Nov 24 17:58:20 crc kubenswrapper[4702]: E1124 17:58:20.649559 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8g6cn_openshift-multus(f4859751-212a-4d94-b0c7-875b1da99cd8)\"" pod="openshift-multus/multus-8g6cn" podUID="f4859751-212a-4d94-b0c7-875b1da99cd8" Nov 24 17:58:22 crc kubenswrapper[4702]: I1124 17:58:22.482937 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:58:22 crc kubenswrapper[4702]: I1124 17:58:22.483283 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:58:22 crc kubenswrapper[4702]: I1124 17:58:22.483332 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 17:58:22 crc kubenswrapper[4702]: I1124 17:58:22.483882 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:58:22 crc kubenswrapper[4702]: I1124 17:58:22.483948 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815" gracePeriod=600 Nov 24 17:58:23 crc kubenswrapper[4702]: I1124 17:58:23.001023 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815" exitCode=0 Nov 24 17:58:23 crc kubenswrapper[4702]: I1124 17:58:23.001105 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815"} Nov 24 17:58:23 crc kubenswrapper[4702]: I1124 17:58:23.001500 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b"} Nov 24 17:58:23 crc kubenswrapper[4702]: I1124 17:58:23.001522 4702 scope.go:117] "RemoveContainer" containerID="d5b90336aa6316dac29575f9b256dd04f79381722d37dff677c87e96b2991ef3" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.870300 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7"] Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.872010 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.873777 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.876965 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxjmp\" (UniqueName: \"kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.877094 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.877173 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.880031 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7"] Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.978071 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxjmp\" (UniqueName: \"kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.978562 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.978600 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.979160 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.979235 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:30 crc kubenswrapper[4702]: I1124 17:58:30.996020 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxjmp\" (UniqueName: \"kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:31 crc kubenswrapper[4702]: I1124 17:58:31.186900 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:31 crc kubenswrapper[4702]: E1124 17:58:31.223534 4702 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(8fe64ee2b740adfb8576da06c3c75f59906fc1b314af83ac1e4fd606b2f0eb67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:58:31 crc kubenswrapper[4702]: E1124 17:58:31.223666 4702 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(8fe64ee2b740adfb8576da06c3c75f59906fc1b314af83ac1e4fd606b2f0eb67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:31 crc kubenswrapper[4702]: E1124 17:58:31.223718 4702 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(8fe64ee2b740adfb8576da06c3c75f59906fc1b314af83ac1e4fd606b2f0eb67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:31 crc kubenswrapper[4702]: E1124 17:58:31.223868 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace(4b8235d7-a978-4f3a-ae4e-2bd3a856deac)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace(4b8235d7-a978-4f3a-ae4e-2bd3a856deac)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(8fe64ee2b740adfb8576da06c3c75f59906fc1b314af83ac1e4fd606b2f0eb67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" Nov 24 17:58:32 crc kubenswrapper[4702]: I1124 17:58:32.053962 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:32 crc kubenswrapper[4702]: I1124 17:58:32.055075 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:32 crc kubenswrapper[4702]: E1124 17:58:32.084780 4702 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(7d47929d9245148f098d293f42694c0f89e69d085df8fb28c342f58a49082b44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:58:32 crc kubenswrapper[4702]: E1124 17:58:32.084912 4702 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(7d47929d9245148f098d293f42694c0f89e69d085df8fb28c342f58a49082b44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:32 crc kubenswrapper[4702]: E1124 17:58:32.084953 4702 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(7d47929d9245148f098d293f42694c0f89e69d085df8fb28c342f58a49082b44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:32 crc kubenswrapper[4702]: E1124 17:58:32.085038 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace(4b8235d7-a978-4f3a-ae4e-2bd3a856deac)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace(4b8235d7-a978-4f3a-ae4e-2bd3a856deac)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_openshift-marketplace_4b8235d7-a978-4f3a-ae4e-2bd3a856deac_0(7d47929d9245148f098d293f42694c0f89e69d085df8fb28c342f58a49082b44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" Nov 24 17:58:33 crc kubenswrapper[4702]: I1124 17:58:33.648148 4702 scope.go:117] "RemoveContainer" containerID="fcd5c329e31faa544fa9f1cfb6487cbc9a3da721cc43c9c2dd82266ec8ac62cf" Nov 24 17:58:34 crc kubenswrapper[4702]: I1124 17:58:34.065913 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8g6cn_f4859751-212a-4d94-b0c7-875b1da99cd8/kube-multus/2.log" Nov 24 17:58:34 crc kubenswrapper[4702]: I1124 17:58:34.066372 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8g6cn" event={"ID":"f4859751-212a-4d94-b0c7-875b1da99cd8","Type":"ContainerStarted","Data":"d0edbedae208b886c5031590bde6612636c393c6b642e56f95d24acb758e2838"} Nov 24 17:58:37 crc kubenswrapper[4702]: I1124 17:58:37.348712 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88dpn" Nov 24 17:58:46 crc kubenswrapper[4702]: I1124 17:58:46.647333 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:46 crc kubenswrapper[4702]: I1124 17:58:46.648036 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:46 crc kubenswrapper[4702]: I1124 17:58:46.818996 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7"] Nov 24 17:58:47 crc kubenswrapper[4702]: I1124 17:58:47.141615 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerStarted","Data":"4f22e251df98e90c4ff591897c57dcdd680f89a13b23e259d597eded94586b69"} Nov 24 17:58:47 crc kubenswrapper[4702]: I1124 17:58:47.141947 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerStarted","Data":"016454e38a325f0ac4f9c859340b553b2386d1197c3f6a0d504f7a0c12125b8a"} Nov 24 17:58:48 crc kubenswrapper[4702]: I1124 17:58:48.147546 4702 generic.go:334] "Generic (PLEG): container finished" podID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerID="4f22e251df98e90c4ff591897c57dcdd680f89a13b23e259d597eded94586b69" exitCode=0 Nov 24 17:58:48 crc kubenswrapper[4702]: I1124 17:58:48.147630 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerDied","Data":"4f22e251df98e90c4ff591897c57dcdd680f89a13b23e259d597eded94586b69"} Nov 24 17:58:48 crc kubenswrapper[4702]: I1124 17:58:48.150527 4702 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:58:50 crc kubenswrapper[4702]: I1124 17:58:50.163586 4702 generic.go:334] "Generic (PLEG): container finished" podID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerID="4d446ae391df105d48ca232d7d1a6d8cf36ec3fe52e3b490f090e2adf4fbfb53" exitCode=0 Nov 24 17:58:50 crc kubenswrapper[4702]: I1124 17:58:50.163631 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerDied","Data":"4d446ae391df105d48ca232d7d1a6d8cf36ec3fe52e3b490f090e2adf4fbfb53"} Nov 24 17:58:51 crc kubenswrapper[4702]: I1124 17:58:51.174942 4702 generic.go:334] "Generic (PLEG): container finished" podID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerID="d89a435d4d6bb576e0cbf3061df1f65eedfc14e9dd81819dbe67d4834b013f43" exitCode=0 Nov 24 17:58:51 crc kubenswrapper[4702]: I1124 17:58:51.175007 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerDied","Data":"d89a435d4d6bb576e0cbf3061df1f65eedfc14e9dd81819dbe67d4834b013f43"} Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.394886 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.445425 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util\") pod \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.445494 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxjmp\" (UniqueName: \"kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp\") pod \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.445601 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle\") pod \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\" (UID: \"4b8235d7-a978-4f3a-ae4e-2bd3a856deac\") " Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.447119 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle" (OuterVolumeSpecName: "bundle") pod "4b8235d7-a978-4f3a-ae4e-2bd3a856deac" (UID: "4b8235d7-a978-4f3a-ae4e-2bd3a856deac"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.451291 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp" (OuterVolumeSpecName: "kube-api-access-zxjmp") pod "4b8235d7-a978-4f3a-ae4e-2bd3a856deac" (UID: "4b8235d7-a978-4f3a-ae4e-2bd3a856deac"). InnerVolumeSpecName "kube-api-access-zxjmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.547077 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxjmp\" (UniqueName: \"kubernetes.io/projected/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-kube-api-access-zxjmp\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.547119 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.806270 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util" (OuterVolumeSpecName: "util") pod "4b8235d7-a978-4f3a-ae4e-2bd3a856deac" (UID: "4b8235d7-a978-4f3a-ae4e-2bd3a856deac"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:58:52 crc kubenswrapper[4702]: I1124 17:58:52.851019 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4b8235d7-a978-4f3a-ae4e-2bd3a856deac-util\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:53 crc kubenswrapper[4702]: I1124 17:58:53.189600 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" event={"ID":"4b8235d7-a978-4f3a-ae4e-2bd3a856deac","Type":"ContainerDied","Data":"016454e38a325f0ac4f9c859340b553b2386d1197c3f6a0d504f7a0c12125b8a"} Nov 24 17:58:53 crc kubenswrapper[4702]: I1124 17:58:53.189656 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="016454e38a325f0ac4f9c859340b553b2386d1197c3f6a0d504f7a0c12125b8a" Nov 24 17:58:53 crc kubenswrapper[4702]: I1124 17:58:53.189882 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.136672 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5f77795d-q8krl"] Nov 24 17:59:04 crc kubenswrapper[4702]: E1124 17:59:04.137235 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="pull" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.137247 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="pull" Nov 24 17:59:04 crc kubenswrapper[4702]: E1124 17:59:04.137262 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="util" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.137269 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="util" Nov 24 17:59:04 crc kubenswrapper[4702]: E1124 17:59:04.137278 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="extract" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.137284 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="extract" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.137374 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b8235d7-a978-4f3a-ae4e-2bd3a856deac" containerName="extract" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.137726 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.140220 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.140348 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.140575 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.140886 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.140956 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-ssgp7" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.152306 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5f77795d-q8krl"] Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.276850 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-webhook-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.276930 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqszv\" (UniqueName: \"kubernetes.io/projected/80458bb6-ef17-4651-a20d-0e9d9b7659a3-kube-api-access-cqszv\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.276961 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-apiservice-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.377996 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-apiservice-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.378080 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-webhook-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.378121 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqszv\" (UniqueName: \"kubernetes.io/projected/80458bb6-ef17-4651-a20d-0e9d9b7659a3-kube-api-access-cqszv\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.386839 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-webhook-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.390096 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/80458bb6-ef17-4651-a20d-0e9d9b7659a3-apiservice-cert\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.394336 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqszv\" (UniqueName: \"kubernetes.io/projected/80458bb6-ef17-4651-a20d-0e9d9b7659a3-kube-api-access-cqszv\") pod \"metallb-operator-controller-manager-5f77795d-q8krl\" (UID: \"80458bb6-ef17-4651-a20d-0e9d9b7659a3\") " pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.452881 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.459549 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l"] Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.460925 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.467209 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.467327 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-jvrpq" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.467732 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.520421 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l"] Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.590682 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-webhook-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.590734 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6k7k\" (UniqueName: \"kubernetes.io/projected/c726fa9b-4677-4c40-8ebd-242f517a6375-kube-api-access-l6k7k\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.590761 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-apiservice-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.647485 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5f77795d-q8krl"] Nov 24 17:59:04 crc kubenswrapper[4702]: W1124 17:59:04.655149 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80458bb6_ef17_4651_a20d_0e9d9b7659a3.slice/crio-0dd117c7155e6f61d262a8712981eed1b2d295999518c6dbc44362ad2b10dc16 WatchSource:0}: Error finding container 0dd117c7155e6f61d262a8712981eed1b2d295999518c6dbc44362ad2b10dc16: Status 404 returned error can't find the container with id 0dd117c7155e6f61d262a8712981eed1b2d295999518c6dbc44362ad2b10dc16 Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.691698 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-webhook-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.691951 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6k7k\" (UniqueName: \"kubernetes.io/projected/c726fa9b-4677-4c40-8ebd-242f517a6375-kube-api-access-l6k7k\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.691993 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-apiservice-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.696306 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-webhook-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.696901 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c726fa9b-4677-4c40-8ebd-242f517a6375-apiservice-cert\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.707465 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6k7k\" (UniqueName: \"kubernetes.io/projected/c726fa9b-4677-4c40-8ebd-242f517a6375-kube-api-access-l6k7k\") pod \"metallb-operator-webhook-server-9fc6b78-vsd8l\" (UID: \"c726fa9b-4677-4c40-8ebd-242f517a6375\") " pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:04 crc kubenswrapper[4702]: I1124 17:59:04.812180 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:05 crc kubenswrapper[4702]: I1124 17:59:05.018672 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l"] Nov 24 17:59:05 crc kubenswrapper[4702]: I1124 17:59:05.258163 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" event={"ID":"80458bb6-ef17-4651-a20d-0e9d9b7659a3","Type":"ContainerStarted","Data":"0dd117c7155e6f61d262a8712981eed1b2d295999518c6dbc44362ad2b10dc16"} Nov 24 17:59:05 crc kubenswrapper[4702]: I1124 17:59:05.260317 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" event={"ID":"c726fa9b-4677-4c40-8ebd-242f517a6375","Type":"ContainerStarted","Data":"0d8999ac77b09522adee45909c599c19888c1084a79549ace6eb7c890ef5bfbe"} Nov 24 17:59:07 crc kubenswrapper[4702]: I1124 17:59:07.275552 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" event={"ID":"80458bb6-ef17-4651-a20d-0e9d9b7659a3","Type":"ContainerStarted","Data":"4f639d76707d95ae59cc1710fac9dfe466c3d053823e6322a94a90d2a2dbe26a"} Nov 24 17:59:07 crc kubenswrapper[4702]: I1124 17:59:07.275954 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:07 crc kubenswrapper[4702]: I1124 17:59:07.295595 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" podStartSLOduration=0.974432617 podStartE2EDuration="3.295578291s" podCreationTimestamp="2025-11-24 17:59:04 +0000 UTC" firstStartedPulling="2025-11-24 17:59:04.660388438 +0000 UTC m=+633.901129602" lastFinishedPulling="2025-11-24 17:59:06.981534112 +0000 UTC m=+636.222275276" observedRunningTime="2025-11-24 17:59:07.294203647 +0000 UTC m=+636.534944831" watchObservedRunningTime="2025-11-24 17:59:07.295578291 +0000 UTC m=+636.536319456" Nov 24 17:59:09 crc kubenswrapper[4702]: I1124 17:59:09.286468 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" event={"ID":"c726fa9b-4677-4c40-8ebd-242f517a6375","Type":"ContainerStarted","Data":"525be2c6035f9b5132c9967c5f96d905885fc94d33128f83a0f9e109a161ed61"} Nov 24 17:59:09 crc kubenswrapper[4702]: I1124 17:59:09.287006 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:09 crc kubenswrapper[4702]: I1124 17:59:09.305544 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" podStartSLOduration=1.7690711430000001 podStartE2EDuration="5.305531767s" podCreationTimestamp="2025-11-24 17:59:04 +0000 UTC" firstStartedPulling="2025-11-24 17:59:05.024972433 +0000 UTC m=+634.265713597" lastFinishedPulling="2025-11-24 17:59:08.561433057 +0000 UTC m=+637.802174221" observedRunningTime="2025-11-24 17:59:09.303425515 +0000 UTC m=+638.544166699" watchObservedRunningTime="2025-11-24 17:59:09.305531767 +0000 UTC m=+638.546272931" Nov 24 17:59:24 crc kubenswrapper[4702]: I1124 17:59:24.821473 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-9fc6b78-vsd8l" Nov 24 17:59:44 crc kubenswrapper[4702]: I1124 17:59:44.455620 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5f77795d-q8krl" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.135467 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-lr7vm"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.137493 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.139460 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-startup\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.139642 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.139760 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.139898 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm9mn\" (UniqueName: \"kubernetes.io/projected/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-kube-api-access-fm9mn\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.140019 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-sockets\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.140139 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-reloader\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.140235 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-conf\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.140698 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-rzfqw" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.140923 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.141694 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.157033 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-snxq2"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.157885 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.159961 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.170281 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-snxq2"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.233103 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-2xjjq"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.234145 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.236654 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.237883 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.237903 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.238614 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-f6scd" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241241 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm9mn\" (UniqueName: \"kubernetes.io/projected/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-kube-api-access-fm9mn\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241396 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-6m56b"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241405 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-sockets\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241620 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metallb-excludel2\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241661 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69w8x\" (UniqueName: \"kubernetes.io/projected/457d1440-b56e-496a-82a1-89d661eadc8e-kube-api-access-69w8x\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241697 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqg8s\" (UniqueName: \"kubernetes.io/projected/e50e827d-eb1f-4401-80ca-c1b59cb02e75-kube-api-access-qqg8s\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241735 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-reloader\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241772 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-conf\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241857 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-startup\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241899 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/457d1440-b56e-496a-82a1-89d661eadc8e-cert\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241934 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241956 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.241984 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.242049 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.242369 4702 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.242419 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs podName:9cba0ae5-b6cc-4ade-b903-c7b28bbaf372 nodeName:}" failed. No retries permitted until 2025-11-24 17:59:45.742402336 +0000 UTC m=+674.983143500 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs") pod "frr-k8s-lr7vm" (UID: "9cba0ae5-b6cc-4ade-b903-c7b28bbaf372") : secret "frr-k8s-certs-secret" not found Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.242424 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.243033 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-reloader\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.243405 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-conf\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.243610 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-sockets\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.243842 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.244124 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-frr-startup\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.248526 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.256474 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-6m56b"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.266601 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm9mn\" (UniqueName: \"kubernetes.io/projected/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-kube-api-access-fm9mn\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343427 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-metrics-certs\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343474 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/457d1440-b56e-496a-82a1-89d661eadc8e-cert\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343500 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343520 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343535 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-cert\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343586 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metallb-excludel2\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343606 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69w8x\" (UniqueName: \"kubernetes.io/projected/457d1440-b56e-496a-82a1-89d661eadc8e-kube-api-access-69w8x\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343630 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqg8s\" (UniqueName: \"kubernetes.io/projected/e50e827d-eb1f-4401-80ca-c1b59cb02e75-kube-api-access-qqg8s\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.343636 4702 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.343646 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqfz7\" (UniqueName: \"kubernetes.io/projected/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-kube-api-access-nqfz7\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.343650 4702 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.343685 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs podName:e50e827d-eb1f-4401-80ca-c1b59cb02e75 nodeName:}" failed. No retries permitted until 2025-11-24 17:59:45.843668678 +0000 UTC m=+675.084409842 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs") pod "speaker-2xjjq" (UID: "e50e827d-eb1f-4401-80ca-c1b59cb02e75") : secret "speaker-certs-secret" not found Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.343724 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist podName:e50e827d-eb1f-4401-80ca-c1b59cb02e75 nodeName:}" failed. No retries permitted until 2025-11-24 17:59:45.843701149 +0000 UTC m=+675.084442313 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist") pod "speaker-2xjjq" (UID: "e50e827d-eb1f-4401-80ca-c1b59cb02e75") : secret "metallb-memberlist" not found Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.344299 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metallb-excludel2\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.346903 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/457d1440-b56e-496a-82a1-89d661eadc8e-cert\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.361400 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqg8s\" (UniqueName: \"kubernetes.io/projected/e50e827d-eb1f-4401-80ca-c1b59cb02e75-kube-api-access-qqg8s\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.364450 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69w8x\" (UniqueName: \"kubernetes.io/projected/457d1440-b56e-496a-82a1-89d661eadc8e-kube-api-access-69w8x\") pod \"frr-k8s-webhook-server-6998585d5-snxq2\" (UID: \"457d1440-b56e-496a-82a1-89d661eadc8e\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.446478 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-metrics-certs\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.446609 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-cert\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.446692 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqfz7\" (UniqueName: \"kubernetes.io/projected/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-kube-api-access-nqfz7\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.448426 4702 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.449573 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-metrics-certs\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.461657 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-cert\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.474651 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqfz7\" (UniqueName: \"kubernetes.io/projected/a5e78bfd-acd0-4668-9bc1-7e2f91859d00-kube-api-access-nqfz7\") pod \"controller-6c7b4b5f48-6m56b\" (UID: \"a5e78bfd-acd0-4668-9bc1-7e2f91859d00\") " pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.480578 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.566479 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.660116 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-snxq2"] Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.733619 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-6m56b"] Nov 24 17:59:45 crc kubenswrapper[4702]: W1124 17:59:45.736916 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5e78bfd_acd0_4668_9bc1_7e2f91859d00.slice/crio-02e93456590bad663e999abbbfc064e244af179599b55eafc1796c073911c873 WatchSource:0}: Error finding container 02e93456590bad663e999abbbfc064e244af179599b55eafc1796c073911c873: Status 404 returned error can't find the container with id 02e93456590bad663e999abbbfc064e244af179599b55eafc1796c073911c873 Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.752221 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.757948 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cba0ae5-b6cc-4ade-b903-c7b28bbaf372-metrics-certs\") pod \"frr-k8s-lr7vm\" (UID: \"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372\") " pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.853856 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.853908 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.853990 4702 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 17:59:45 crc kubenswrapper[4702]: E1124 17:59:45.854101 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist podName:e50e827d-eb1f-4401-80ca-c1b59cb02e75 nodeName:}" failed. No retries permitted until 2025-11-24 17:59:46.854082658 +0000 UTC m=+676.094823822 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist") pod "speaker-2xjjq" (UID: "e50e827d-eb1f-4401-80ca-c1b59cb02e75") : secret "metallb-memberlist" not found Nov 24 17:59:45 crc kubenswrapper[4702]: I1124 17:59:45.858021 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-metrics-certs\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.055864 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.460782 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6m56b" event={"ID":"a5e78bfd-acd0-4668-9bc1-7e2f91859d00","Type":"ContainerStarted","Data":"c090aa277d7e86483e9341d9838fabb2e7e8e448d03b53653126efd64d5cbc36"} Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.460835 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6m56b" event={"ID":"a5e78bfd-acd0-4668-9bc1-7e2f91859d00","Type":"ContainerStarted","Data":"02e93456590bad663e999abbbfc064e244af179599b55eafc1796c073911c873"} Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.461759 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"6c56664e0f96c8caf1ff335cf6b3077107ff8f52463f12f2269def54aa736391"} Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.462533 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" event={"ID":"457d1440-b56e-496a-82a1-89d661eadc8e","Type":"ContainerStarted","Data":"bc01a39ce12a9abb4c26c95a05a9a0d55e3d34abf42d4c56d53dc9b260550ff7"} Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.866232 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:46 crc kubenswrapper[4702]: I1124 17:59:46.870452 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e50e827d-eb1f-4401-80ca-c1b59cb02e75-memberlist\") pod \"speaker-2xjjq\" (UID: \"e50e827d-eb1f-4401-80ca-c1b59cb02e75\") " pod="metallb-system/speaker-2xjjq" Nov 24 17:59:47 crc kubenswrapper[4702]: I1124 17:59:47.053473 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2xjjq" Nov 24 17:59:47 crc kubenswrapper[4702]: W1124 17:59:47.076344 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode50e827d_eb1f_4401_80ca_c1b59cb02e75.slice/crio-1c43363bcaf57177bcfe46c83316bb59c13cd50f33b59cde0b43e822a7461357 WatchSource:0}: Error finding container 1c43363bcaf57177bcfe46c83316bb59c13cd50f33b59cde0b43e822a7461357: Status 404 returned error can't find the container with id 1c43363bcaf57177bcfe46c83316bb59c13cd50f33b59cde0b43e822a7461357 Nov 24 17:59:47 crc kubenswrapper[4702]: I1124 17:59:47.476501 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2xjjq" event={"ID":"e50e827d-eb1f-4401-80ca-c1b59cb02e75","Type":"ContainerStarted","Data":"ad5aae07bbd89eb2990d564b1d278f205a0e7af4ed93a39fcc1dbb68d5d6b1ef"} Nov 24 17:59:47 crc kubenswrapper[4702]: I1124 17:59:47.476542 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2xjjq" event={"ID":"e50e827d-eb1f-4401-80ca-c1b59cb02e75","Type":"ContainerStarted","Data":"1c43363bcaf57177bcfe46c83316bb59c13cd50f33b59cde0b43e822a7461357"} Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.492183 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2xjjq" event={"ID":"e50e827d-eb1f-4401-80ca-c1b59cb02e75","Type":"ContainerStarted","Data":"dd3fe2c1ae178602962cc6de9b89de5cd1d097afb3cc9af4a861a1917366ae6d"} Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.492792 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-2xjjq" Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.493679 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6m56b" event={"ID":"a5e78bfd-acd0-4668-9bc1-7e2f91859d00","Type":"ContainerStarted","Data":"133df797bf121fe4fe8e1c450d4d2597b23507b9ef635e7a38478fbd46b2a60b"} Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.493819 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.516409 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-2xjjq" podStartSLOduration=3.2629534749999998 podStartE2EDuration="4.516393816s" podCreationTimestamp="2025-11-24 17:59:45 +0000 UTC" firstStartedPulling="2025-11-24 17:59:47.377885081 +0000 UTC m=+676.618626255" lastFinishedPulling="2025-11-24 17:59:48.631325432 +0000 UTC m=+677.872066596" observedRunningTime="2025-11-24 17:59:49.51089231 +0000 UTC m=+678.751633484" watchObservedRunningTime="2025-11-24 17:59:49.516393816 +0000 UTC m=+678.757134980" Nov 24 17:59:49 crc kubenswrapper[4702]: I1124 17:59:49.531523 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-6m56b" podStartSLOduration=1.758551746 podStartE2EDuration="4.531508297s" podCreationTimestamp="2025-11-24 17:59:45 +0000 UTC" firstStartedPulling="2025-11-24 17:59:45.855162646 +0000 UTC m=+675.095903810" lastFinishedPulling="2025-11-24 17:59:48.628119197 +0000 UTC m=+677.868860361" observedRunningTime="2025-11-24 17:59:49.527656244 +0000 UTC m=+678.768397428" watchObservedRunningTime="2025-11-24 17:59:49.531508297 +0000 UTC m=+678.772249471" Nov 24 17:59:52 crc kubenswrapper[4702]: I1124 17:59:52.511837 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" event={"ID":"457d1440-b56e-496a-82a1-89d661eadc8e","Type":"ContainerStarted","Data":"58c07c63c054b2da3f51d382d231cdedcd1cffe1ab967d187ba106dd9b11d056"} Nov 24 17:59:52 crc kubenswrapper[4702]: I1124 17:59:52.512468 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 17:59:52 crc kubenswrapper[4702]: I1124 17:59:52.516084 4702 generic.go:334] "Generic (PLEG): container finished" podID="9cba0ae5-b6cc-4ade-b903-c7b28bbaf372" containerID="5c3fa2143ce25cf259fb8dab41b7d284c0ae47893f0d3f3fa4abf09830486adf" exitCode=0 Nov 24 17:59:52 crc kubenswrapper[4702]: I1124 17:59:52.516128 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerDied","Data":"5c3fa2143ce25cf259fb8dab41b7d284c0ae47893f0d3f3fa4abf09830486adf"} Nov 24 17:59:52 crc kubenswrapper[4702]: I1124 17:59:52.529425 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" podStartSLOduration=1.359884645 podStartE2EDuration="7.529409836s" podCreationTimestamp="2025-11-24 17:59:45 +0000 UTC" firstStartedPulling="2025-11-24 17:59:45.669595931 +0000 UTC m=+674.910337095" lastFinishedPulling="2025-11-24 17:59:51.839121122 +0000 UTC m=+681.079862286" observedRunningTime="2025-11-24 17:59:52.527130635 +0000 UTC m=+681.767871799" watchObservedRunningTime="2025-11-24 17:59:52.529409836 +0000 UTC m=+681.770151000" Nov 24 17:59:53 crc kubenswrapper[4702]: I1124 17:59:53.525259 4702 generic.go:334] "Generic (PLEG): container finished" podID="9cba0ae5-b6cc-4ade-b903-c7b28bbaf372" containerID="916b08491ed53d74ade82ddfeeff9776a934800de62c7cd41ce3d12415dbcf67" exitCode=0 Nov 24 17:59:53 crc kubenswrapper[4702]: I1124 17:59:53.525301 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerDied","Data":"916b08491ed53d74ade82ddfeeff9776a934800de62c7cd41ce3d12415dbcf67"} Nov 24 17:59:54 crc kubenswrapper[4702]: I1124 17:59:54.537830 4702 generic.go:334] "Generic (PLEG): container finished" podID="9cba0ae5-b6cc-4ade-b903-c7b28bbaf372" containerID="7e54936a2e83719012afbcdaa75cc106b8535264702efc846f47433f811af938" exitCode=0 Nov 24 17:59:54 crc kubenswrapper[4702]: I1124 17:59:54.537933 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerDied","Data":"7e54936a2e83719012afbcdaa75cc106b8535264702efc846f47433f811af938"} Nov 24 17:59:55 crc kubenswrapper[4702]: I1124 17:59:55.546846 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"124129e08d3dd38575ddf8b823345a880ef3fe30b41168accc05b10edba31507"} Nov 24 17:59:55 crc kubenswrapper[4702]: I1124 17:59:55.547148 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"b1de64e9c32a49d5c7d55ee1084e8a732af679b8ddf167d3bc0b063935b9c4bb"} Nov 24 17:59:55 crc kubenswrapper[4702]: I1124 17:59:55.547165 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"0cdb48d679f12f22d5a91845f7bf3dd126c6fa5900f06802cc80f74b24972cc1"} Nov 24 17:59:55 crc kubenswrapper[4702]: I1124 17:59:55.547179 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"944d1793615cb396d1f1856382da848f55cff79960721f3c6949a619ec39edec"} Nov 24 17:59:55 crc kubenswrapper[4702]: I1124 17:59:55.571654 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-6m56b" Nov 24 17:59:56 crc kubenswrapper[4702]: I1124 17:59:56.561275 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"a3cd6ece691eddeb5f258ddde0f4a054c270088c02657694f1d4919172f42818"} Nov 24 17:59:56 crc kubenswrapper[4702]: I1124 17:59:56.561335 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lr7vm" event={"ID":"9cba0ae5-b6cc-4ade-b903-c7b28bbaf372","Type":"ContainerStarted","Data":"67a0c78b5aa060b6e2606c51a666dd033534edd1d9fd36c55b39173df8ee42de"} Nov 24 17:59:56 crc kubenswrapper[4702]: I1124 17:59:56.561509 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-lr7vm" Nov 24 17:59:56 crc kubenswrapper[4702]: I1124 17:59:56.592828 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-lr7vm" podStartSLOduration=5.9119135400000005 podStartE2EDuration="11.592792907s" podCreationTimestamp="2025-11-24 17:59:45 +0000 UTC" firstStartedPulling="2025-11-24 17:59:46.151848975 +0000 UTC m=+675.392590139" lastFinishedPulling="2025-11-24 17:59:51.832728342 +0000 UTC m=+681.073469506" observedRunningTime="2025-11-24 17:59:56.590894528 +0000 UTC m=+685.831635772" watchObservedRunningTime="2025-11-24 17:59:56.592792907 +0000 UTC m=+685.833534071" Nov 24 17:59:57 crc kubenswrapper[4702]: I1124 17:59:57.056700 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-2xjjq" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.128717 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8"] Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.129873 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.132091 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.132610 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.135288 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.135399 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.135460 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvp2w\" (UniqueName: \"kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.138080 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8"] Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.237339 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.237408 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvp2w\" (UniqueName: \"kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.237465 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.238505 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.244333 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.253666 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvp2w\" (UniqueName: \"kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w\") pod \"collect-profiles-29400120-qxgt8\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.448826 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:00 crc kubenswrapper[4702]: I1124 18:00:00.847564 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8"] Nov 24 18:00:00 crc kubenswrapper[4702]: W1124 18:00:00.850621 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8924d273_4105_4640_9f7b_ae9278125ff2.slice/crio-fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8 WatchSource:0}: Error finding container fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8: Status 404 returned error can't find the container with id fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8 Nov 24 18:00:01 crc kubenswrapper[4702]: I1124 18:00:01.056784 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-lr7vm" Nov 24 18:00:01 crc kubenswrapper[4702]: I1124 18:00:01.107193 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-lr7vm" Nov 24 18:00:01 crc kubenswrapper[4702]: I1124 18:00:01.594954 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" event={"ID":"8924d273-4105-4640-9f7b-ae9278125ff2","Type":"ContainerStarted","Data":"fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8"} Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.119554 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.120290 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.122300 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.122450 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.124048 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-ch4b9" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.141254 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.274055 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx2lt\" (UniqueName: \"kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt\") pod \"mariadb-operator-index-cgtk8\" (UID: \"21f94815-124b-47d7-8e74-c05b17eda868\") " pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.375049 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx2lt\" (UniqueName: \"kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt\") pod \"mariadb-operator-index-cgtk8\" (UID: \"21f94815-124b-47d7-8e74-c05b17eda868\") " pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.391919 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx2lt\" (UniqueName: \"kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt\") pod \"mariadb-operator-index-cgtk8\" (UID: \"21f94815-124b-47d7-8e74-c05b17eda868\") " pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.443891 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:03 crc kubenswrapper[4702]: I1124 18:00:03.810653 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:03 crc kubenswrapper[4702]: W1124 18:00:03.814520 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21f94815_124b_47d7_8e74_c05b17eda868.slice/crio-f460b4df6612629c93e6066088a389ddb80284cbef1830f4a4c5feb18f7e691c WatchSource:0}: Error finding container f460b4df6612629c93e6066088a389ddb80284cbef1830f4a4c5feb18f7e691c: Status 404 returned error can't find the container with id f460b4df6612629c93e6066088a389ddb80284cbef1830f4a4c5feb18f7e691c Nov 24 18:00:04 crc kubenswrapper[4702]: I1124 18:00:04.613296 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-cgtk8" event={"ID":"21f94815-124b-47d7-8e74-c05b17eda868","Type":"ContainerStarted","Data":"f460b4df6612629c93e6066088a389ddb80284cbef1830f4a4c5feb18f7e691c"} Nov 24 18:00:04 crc kubenswrapper[4702]: I1124 18:00:04.615020 4702 generic.go:334] "Generic (PLEG): container finished" podID="8924d273-4105-4640-9f7b-ae9278125ff2" containerID="3abfa74e52cd34490434643fc751c60d126a613d4cdccee8393144411f183415" exitCode=0 Nov 24 18:00:04 crc kubenswrapper[4702]: I1124 18:00:04.615054 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" event={"ID":"8924d273-4105-4640-9f7b-ae9278125ff2","Type":"ContainerDied","Data":"3abfa74e52cd34490434643fc751c60d126a613d4cdccee8393144411f183415"} Nov 24 18:00:05 crc kubenswrapper[4702]: I1124 18:00:05.486633 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-snxq2" Nov 24 18:00:05 crc kubenswrapper[4702]: I1124 18:00:05.627831 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-cgtk8" event={"ID":"21f94815-124b-47d7-8e74-c05b17eda868","Type":"ContainerStarted","Data":"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1"} Nov 24 18:00:05 crc kubenswrapper[4702]: I1124 18:00:05.642709 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-cgtk8" podStartSLOduration=1.561456693 podStartE2EDuration="2.642691573s" podCreationTimestamp="2025-11-24 18:00:03 +0000 UTC" firstStartedPulling="2025-11-24 18:00:03.816426039 +0000 UTC m=+693.057167203" lastFinishedPulling="2025-11-24 18:00:04.897660919 +0000 UTC m=+694.138402083" observedRunningTime="2025-11-24 18:00:05.641018469 +0000 UTC m=+694.881759633" watchObservedRunningTime="2025-11-24 18:00:05.642691573 +0000 UTC m=+694.883432737" Nov 24 18:00:05 crc kubenswrapper[4702]: I1124 18:00:05.866867 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.007244 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume\") pod \"8924d273-4105-4640-9f7b-ae9278125ff2\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.007322 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvp2w\" (UniqueName: \"kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w\") pod \"8924d273-4105-4640-9f7b-ae9278125ff2\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.007384 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume\") pod \"8924d273-4105-4640-9f7b-ae9278125ff2\" (UID: \"8924d273-4105-4640-9f7b-ae9278125ff2\") " Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.008089 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume" (OuterVolumeSpecName: "config-volume") pod "8924d273-4105-4640-9f7b-ae9278125ff2" (UID: "8924d273-4105-4640-9f7b-ae9278125ff2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.012145 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8924d273-4105-4640-9f7b-ae9278125ff2" (UID: "8924d273-4105-4640-9f7b-ae9278125ff2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.012242 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w" (OuterVolumeSpecName: "kube-api-access-rvp2w") pod "8924d273-4105-4640-9f7b-ae9278125ff2" (UID: "8924d273-4105-4640-9f7b-ae9278125ff2"). InnerVolumeSpecName "kube-api-access-rvp2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.059613 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-lr7vm" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.108161 4702 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8924d273-4105-4640-9f7b-ae9278125ff2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.108193 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvp2w\" (UniqueName: \"kubernetes.io/projected/8924d273-4105-4640-9f7b-ae9278125ff2-kube-api-access-rvp2w\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.108203 4702 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8924d273-4105-4640-9f7b-ae9278125ff2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.304632 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.636188 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" event={"ID":"8924d273-4105-4640-9f7b-ae9278125ff2","Type":"ContainerDied","Data":"fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8"} Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.636224 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fabba105c82295dfc349823dabf90c771f8e786309883dc80109180fd150bce8" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.636239 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-qxgt8" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.907873 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-bnffm"] Nov 24 18:00:06 crc kubenswrapper[4702]: E1124 18:00:06.908157 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8924d273-4105-4640-9f7b-ae9278125ff2" containerName="collect-profiles" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.908181 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="8924d273-4105-4640-9f7b-ae9278125ff2" containerName="collect-profiles" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.908334 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="8924d273-4105-4640-9f7b-ae9278125ff2" containerName="collect-profiles" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.908762 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:06 crc kubenswrapper[4702]: I1124 18:00:06.915658 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-bnffm"] Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.018718 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xww5b\" (UniqueName: \"kubernetes.io/projected/dfce7a84-c113-4ba0-a101-899a5a2b2140-kube-api-access-xww5b\") pod \"mariadb-operator-index-bnffm\" (UID: \"dfce7a84-c113-4ba0-a101-899a5a2b2140\") " pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.119925 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xww5b\" (UniqueName: \"kubernetes.io/projected/dfce7a84-c113-4ba0-a101-899a5a2b2140-kube-api-access-xww5b\") pod \"mariadb-operator-index-bnffm\" (UID: \"dfce7a84-c113-4ba0-a101-899a5a2b2140\") " pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.137861 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xww5b\" (UniqueName: \"kubernetes.io/projected/dfce7a84-c113-4ba0-a101-899a5a2b2140-kube-api-access-xww5b\") pod \"mariadb-operator-index-bnffm\" (UID: \"dfce7a84-c113-4ba0-a101-899a5a2b2140\") " pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.222755 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.641637 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-bnffm"] Nov 24 18:00:07 crc kubenswrapper[4702]: I1124 18:00:07.642081 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-cgtk8" podUID="21f94815-124b-47d7-8e74-c05b17eda868" containerName="registry-server" containerID="cri-o://f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1" gracePeriod=2 Nov 24 18:00:07 crc kubenswrapper[4702]: W1124 18:00:07.657016 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddfce7a84_c113_4ba0_a101_899a5a2b2140.slice/crio-afdba346992534def92f77e613c26c7d4b266dfe2d6db4e8b66ff45839e5f01b WatchSource:0}: Error finding container afdba346992534def92f77e613c26c7d4b266dfe2d6db4e8b66ff45839e5f01b: Status 404 returned error can't find the container with id afdba346992534def92f77e613c26c7d4b266dfe2d6db4e8b66ff45839e5f01b Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.120874 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.232460 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx2lt\" (UniqueName: \"kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt\") pod \"21f94815-124b-47d7-8e74-c05b17eda868\" (UID: \"21f94815-124b-47d7-8e74-c05b17eda868\") " Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.242417 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt" (OuterVolumeSpecName: "kube-api-access-vx2lt") pod "21f94815-124b-47d7-8e74-c05b17eda868" (UID: "21f94815-124b-47d7-8e74-c05b17eda868"). InnerVolumeSpecName "kube-api-access-vx2lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.334483 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx2lt\" (UniqueName: \"kubernetes.io/projected/21f94815-124b-47d7-8e74-c05b17eda868-kube-api-access-vx2lt\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.649652 4702 generic.go:334] "Generic (PLEG): container finished" podID="21f94815-124b-47d7-8e74-c05b17eda868" containerID="f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1" exitCode=0 Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.649723 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-cgtk8" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.649713 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-cgtk8" event={"ID":"21f94815-124b-47d7-8e74-c05b17eda868","Type":"ContainerDied","Data":"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1"} Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.649889 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-cgtk8" event={"ID":"21f94815-124b-47d7-8e74-c05b17eda868","Type":"ContainerDied","Data":"f460b4df6612629c93e6066088a389ddb80284cbef1830f4a4c5feb18f7e691c"} Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.649925 4702 scope.go:117] "RemoveContainer" containerID="f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.652401 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bnffm" event={"ID":"dfce7a84-c113-4ba0-a101-899a5a2b2140","Type":"ContainerStarted","Data":"bf2b2da9ca1146d7e38e4c2fac5e6b1a339382deb2a12d5f1e5a6b301775bd8b"} Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.652448 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bnffm" event={"ID":"dfce7a84-c113-4ba0-a101-899a5a2b2140","Type":"ContainerStarted","Data":"afdba346992534def92f77e613c26c7d4b266dfe2d6db4e8b66ff45839e5f01b"} Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.668277 4702 scope.go:117] "RemoveContainer" containerID="f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1" Nov 24 18:00:08 crc kubenswrapper[4702]: E1124 18:00:08.668992 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1\": container with ID starting with f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1 not found: ID does not exist" containerID="f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.669039 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1"} err="failed to get container status \"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1\": rpc error: code = NotFound desc = could not find container \"f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1\": container with ID starting with f52f79070dc23f1cd540a493ec11d6fa68e7ffbe79f04714fc698db840e638d1 not found: ID does not exist" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.675991 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-bnffm" podStartSLOduration=2.025208663 podStartE2EDuration="2.67596618s" podCreationTimestamp="2025-11-24 18:00:06 +0000 UTC" firstStartedPulling="2025-11-24 18:00:07.662069673 +0000 UTC m=+696.902810837" lastFinishedPulling="2025-11-24 18:00:08.31282719 +0000 UTC m=+697.553568354" observedRunningTime="2025-11-24 18:00:08.673125784 +0000 UTC m=+697.913866948" watchObservedRunningTime="2025-11-24 18:00:08.67596618 +0000 UTC m=+697.916707374" Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.685785 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:08 crc kubenswrapper[4702]: I1124 18:00:08.690587 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-cgtk8"] Nov 24 18:00:09 crc kubenswrapper[4702]: I1124 18:00:09.655059 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21f94815-124b-47d7-8e74-c05b17eda868" path="/var/lib/kubelet/pods/21f94815-124b-47d7-8e74-c05b17eda868/volumes" Nov 24 18:00:17 crc kubenswrapper[4702]: I1124 18:00:17.223716 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:17 crc kubenswrapper[4702]: I1124 18:00:17.224395 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:17 crc kubenswrapper[4702]: I1124 18:00:17.253002 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:17 crc kubenswrapper[4702]: I1124 18:00:17.742151 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-bnffm" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.948256 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p"] Nov 24 18:00:18 crc kubenswrapper[4702]: E1124 18:00:18.948480 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21f94815-124b-47d7-8e74-c05b17eda868" containerName="registry-server" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.948492 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="21f94815-124b-47d7-8e74-c05b17eda868" containerName="registry-server" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.948586 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="21f94815-124b-47d7-8e74-c05b17eda868" containerName="registry-server" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.949311 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.952461 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:00:18 crc kubenswrapper[4702]: I1124 18:00:18.977673 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p"] Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.068197 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b68q\" (UniqueName: \"kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.068259 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.068574 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.171055 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b68q\" (UniqueName: \"kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.171181 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.171216 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.172046 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.172146 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.192631 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b68q\" (UniqueName: \"kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q\") pod \"7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.281093 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.504566 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p"] Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.714001 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerStarted","Data":"624e9c3cd6df7a5c4e9c11a685435ccde73fa821ef632f65e525966b1e3e6c3a"} Nov 24 18:00:19 crc kubenswrapper[4702]: I1124 18:00:19.714519 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerStarted","Data":"1c6fad7ce0248f91b82265b6c7f63d2342fd613929379ddef7a6d48a1a9601d8"} Nov 24 18:00:20 crc kubenswrapper[4702]: I1124 18:00:20.722114 4702 generic.go:334] "Generic (PLEG): container finished" podID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerID="624e9c3cd6df7a5c4e9c11a685435ccde73fa821ef632f65e525966b1e3e6c3a" exitCode=0 Nov 24 18:00:20 crc kubenswrapper[4702]: I1124 18:00:20.722157 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerDied","Data":"624e9c3cd6df7a5c4e9c11a685435ccde73fa821ef632f65e525966b1e3e6c3a"} Nov 24 18:00:22 crc kubenswrapper[4702]: I1124 18:00:22.483293 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:00:22 crc kubenswrapper[4702]: I1124 18:00:22.484011 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:00:23 crc kubenswrapper[4702]: I1124 18:00:23.747574 4702 generic.go:334] "Generic (PLEG): container finished" podID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerID="9d475046ac70e49c44833f98d8cf01a4fbc5dd4a3996a172938c5f19f200d7ce" exitCode=0 Nov 24 18:00:23 crc kubenswrapper[4702]: I1124 18:00:23.747628 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerDied","Data":"9d475046ac70e49c44833f98d8cf01a4fbc5dd4a3996a172938c5f19f200d7ce"} Nov 24 18:00:24 crc kubenswrapper[4702]: I1124 18:00:24.757150 4702 generic.go:334] "Generic (PLEG): container finished" podID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerID="5f8feeea15ab4c614e6a04446a8c1820b24dd7f321a279c5c04a7fe22cf51d37" exitCode=0 Nov 24 18:00:24 crc kubenswrapper[4702]: I1124 18:00:24.757280 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerDied","Data":"5f8feeea15ab4c614e6a04446a8c1820b24dd7f321a279c5c04a7fe22cf51d37"} Nov 24 18:00:25 crc kubenswrapper[4702]: I1124 18:00:25.987031 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.159188 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5b68q\" (UniqueName: \"kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q\") pod \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.159333 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle\") pod \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.159411 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util\") pod \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\" (UID: \"6b34ab2f-e0b1-429e-8b6d-7b8809671888\") " Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.160587 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle" (OuterVolumeSpecName: "bundle") pod "6b34ab2f-e0b1-429e-8b6d-7b8809671888" (UID: "6b34ab2f-e0b1-429e-8b6d-7b8809671888"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.164265 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q" (OuterVolumeSpecName: "kube-api-access-5b68q") pod "6b34ab2f-e0b1-429e-8b6d-7b8809671888" (UID: "6b34ab2f-e0b1-429e-8b6d-7b8809671888"). InnerVolumeSpecName "kube-api-access-5b68q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.169038 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util" (OuterVolumeSpecName: "util") pod "6b34ab2f-e0b1-429e-8b6d-7b8809671888" (UID: "6b34ab2f-e0b1-429e-8b6d-7b8809671888"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.260507 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5b68q\" (UniqueName: \"kubernetes.io/projected/6b34ab2f-e0b1-429e-8b6d-7b8809671888-kube-api-access-5b68q\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.260548 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.260562 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b34ab2f-e0b1-429e-8b6d-7b8809671888-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.772433 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" event={"ID":"6b34ab2f-e0b1-429e-8b6d-7b8809671888","Type":"ContainerDied","Data":"1c6fad7ce0248f91b82265b6c7f63d2342fd613929379ddef7a6d48a1a9601d8"} Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.772487 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c6fad7ce0248f91b82265b6c7f63d2342fd613929379ddef7a6d48a1a9601d8" Nov 24 18:00:26 crc kubenswrapper[4702]: I1124 18:00:26.772498 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.606206 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w"] Nov 24 18:00:32 crc kubenswrapper[4702]: E1124 18:00:32.607677 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="pull" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.607758 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="pull" Nov 24 18:00:32 crc kubenswrapper[4702]: E1124 18:00:32.607858 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="util" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.607922 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="util" Nov 24 18:00:32 crc kubenswrapper[4702]: E1124 18:00:32.607976 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="extract" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.608028 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="extract" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.608174 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b34ab2f-e0b1-429e-8b6d-7b8809671888" containerName="extract" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.608752 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.611094 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.611195 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.611646 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-lcv44" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.620216 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w"] Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.643463 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-webhook-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.643517 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-apiservice-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.643561 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4swx\" (UniqueName: \"kubernetes.io/projected/967cb8ea-a50c-409e-8b5e-f91ae596762c-kube-api-access-s4swx\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.744463 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-webhook-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.744528 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-apiservice-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.744585 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4swx\" (UniqueName: \"kubernetes.io/projected/967cb8ea-a50c-409e-8b5e-f91ae596762c-kube-api-access-s4swx\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.750171 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-webhook-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.751045 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/967cb8ea-a50c-409e-8b5e-f91ae596762c-apiservice-cert\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.764489 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4swx\" (UniqueName: \"kubernetes.io/projected/967cb8ea-a50c-409e-8b5e-f91ae596762c-kube-api-access-s4swx\") pod \"mariadb-operator-controller-manager-74dcfc55d5-fj98w\" (UID: \"967cb8ea-a50c-409e-8b5e-f91ae596762c\") " pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:32 crc kubenswrapper[4702]: I1124 18:00:32.927837 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:33 crc kubenswrapper[4702]: I1124 18:00:33.143882 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w"] Nov 24 18:00:33 crc kubenswrapper[4702]: I1124 18:00:33.813302 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" event={"ID":"967cb8ea-a50c-409e-8b5e-f91ae596762c","Type":"ContainerStarted","Data":"e950185af368c7156d5c2f9d7963bee543b4238fd62639d07b044321c4db51a9"} Nov 24 18:00:36 crc kubenswrapper[4702]: I1124 18:00:36.829595 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" event={"ID":"967cb8ea-a50c-409e-8b5e-f91ae596762c","Type":"ContainerStarted","Data":"162c8057f68bbd6e86d898b5725ba2af99c79640f43a561d7fb90dd0a4394ff7"} Nov 24 18:00:38 crc kubenswrapper[4702]: I1124 18:00:38.840141 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" event={"ID":"967cb8ea-a50c-409e-8b5e-f91ae596762c","Type":"ContainerStarted","Data":"947ae3a9719c6fdb61c036c2b7a0d0a6373afe782a5f77512565f2bca5a7ce51"} Nov 24 18:00:38 crc kubenswrapper[4702]: I1124 18:00:38.840577 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:38 crc kubenswrapper[4702]: I1124 18:00:38.857881 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" podStartSLOduration=1.787522544 podStartE2EDuration="6.857857298s" podCreationTimestamp="2025-11-24 18:00:32 +0000 UTC" firstStartedPulling="2025-11-24 18:00:33.155015568 +0000 UTC m=+722.395756732" lastFinishedPulling="2025-11-24 18:00:38.225350322 +0000 UTC m=+727.466091486" observedRunningTime="2025-11-24 18:00:38.856140853 +0000 UTC m=+728.096882037" watchObservedRunningTime="2025-11-24 18:00:38.857857298 +0000 UTC m=+728.098598472" Nov 24 18:00:42 crc kubenswrapper[4702]: I1124 18:00:42.932404 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-74dcfc55d5-fj98w" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.253168 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.254305 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.256321 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-c559m" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.270978 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.409377 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mkpv\" (UniqueName: \"kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv\") pod \"infra-operator-index-4kfv9\" (UID: \"b7bf52d1-de85-46f5-8951-0e36986e0063\") " pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.510450 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mkpv\" (UniqueName: \"kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv\") pod \"infra-operator-index-4kfv9\" (UID: \"b7bf52d1-de85-46f5-8951-0e36986e0063\") " pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.528210 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mkpv\" (UniqueName: \"kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv\") pod \"infra-operator-index-4kfv9\" (UID: \"b7bf52d1-de85-46f5-8951-0e36986e0063\") " pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.572735 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.848151 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:45 crc kubenswrapper[4702]: I1124 18:00:45.877066 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-4kfv9" event={"ID":"b7bf52d1-de85-46f5-8951-0e36986e0063","Type":"ContainerStarted","Data":"28b11142b5e13011e57b28a389a61e3a0daf8676f2ed22839c26a5326f794f52"} Nov 24 18:00:46 crc kubenswrapper[4702]: I1124 18:00:46.883056 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-4kfv9" event={"ID":"b7bf52d1-de85-46f5-8951-0e36986e0063","Type":"ContainerStarted","Data":"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e"} Nov 24 18:00:46 crc kubenswrapper[4702]: I1124 18:00:46.901812 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-4kfv9" podStartSLOduration=1.107494273 podStartE2EDuration="1.901785165s" podCreationTimestamp="2025-11-24 18:00:45 +0000 UTC" firstStartedPulling="2025-11-24 18:00:45.865531009 +0000 UTC m=+735.106272173" lastFinishedPulling="2025-11-24 18:00:46.659821901 +0000 UTC m=+735.900563065" observedRunningTime="2025-11-24 18:00:46.900780949 +0000 UTC m=+736.141522123" watchObservedRunningTime="2025-11-24 18:00:46.901785165 +0000 UTC m=+736.142526329" Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.238512 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.845577 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-pcdn7"] Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.846426 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.857548 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-pcdn7"] Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.896072 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-4kfv9" podUID="b7bf52d1-de85-46f5-8951-0e36986e0063" containerName="registry-server" containerID="cri-o://07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e" gracePeriod=2 Nov 24 18:00:48 crc kubenswrapper[4702]: I1124 18:00:48.950760 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88xnm\" (UniqueName: \"kubernetes.io/projected/c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18-kube-api-access-88xnm\") pod \"infra-operator-index-pcdn7\" (UID: \"c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18\") " pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.051673 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88xnm\" (UniqueName: \"kubernetes.io/projected/c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18-kube-api-access-88xnm\") pod \"infra-operator-index-pcdn7\" (UID: \"c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18\") " pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.075978 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88xnm\" (UniqueName: \"kubernetes.io/projected/c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18-kube-api-access-88xnm\") pod \"infra-operator-index-pcdn7\" (UID: \"c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18\") " pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.162885 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.246083 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.333917 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-pcdn7"] Nov 24 18:00:49 crc kubenswrapper[4702]: W1124 18:00:49.342927 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5cf41b8_c818_4c3d_9b5c_e19a1eab4c18.slice/crio-1b6c01d72797de122e37a74171d0bf03f83900f20011913f8d985235a1408a4c WatchSource:0}: Error finding container 1b6c01d72797de122e37a74171d0bf03f83900f20011913f8d985235a1408a4c: Status 404 returned error can't find the container with id 1b6c01d72797de122e37a74171d0bf03f83900f20011913f8d985235a1408a4c Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.357976 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mkpv\" (UniqueName: \"kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv\") pod \"b7bf52d1-de85-46f5-8951-0e36986e0063\" (UID: \"b7bf52d1-de85-46f5-8951-0e36986e0063\") " Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.361823 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv" (OuterVolumeSpecName: "kube-api-access-2mkpv") pod "b7bf52d1-de85-46f5-8951-0e36986e0063" (UID: "b7bf52d1-de85-46f5-8951-0e36986e0063"). InnerVolumeSpecName "kube-api-access-2mkpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.459032 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mkpv\" (UniqueName: \"kubernetes.io/projected/b7bf52d1-de85-46f5-8951-0e36986e0063-kube-api-access-2mkpv\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.903071 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-pcdn7" event={"ID":"c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18","Type":"ContainerStarted","Data":"1b6c01d72797de122e37a74171d0bf03f83900f20011913f8d985235a1408a4c"} Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.904558 4702 generic.go:334] "Generic (PLEG): container finished" podID="b7bf52d1-de85-46f5-8951-0e36986e0063" containerID="07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e" exitCode=0 Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.904583 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-4kfv9" event={"ID":"b7bf52d1-de85-46f5-8951-0e36986e0063","Type":"ContainerDied","Data":"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e"} Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.904598 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-4kfv9" event={"ID":"b7bf52d1-de85-46f5-8951-0e36986e0063","Type":"ContainerDied","Data":"28b11142b5e13011e57b28a389a61e3a0daf8676f2ed22839c26a5326f794f52"} Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.904617 4702 scope.go:117] "RemoveContainer" containerID="07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.904644 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-4kfv9" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.920298 4702 scope.go:117] "RemoveContainer" containerID="07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e" Nov 24 18:00:49 crc kubenswrapper[4702]: E1124 18:00:49.921009 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e\": container with ID starting with 07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e not found: ID does not exist" containerID="07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.921046 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e"} err="failed to get container status \"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e\": rpc error: code = NotFound desc = could not find container \"07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e\": container with ID starting with 07355ec4430d7cafebe24f0a9f7904549e6c6ba64f5f3387debee5969f021e0e not found: ID does not exist" Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.922032 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:49 crc kubenswrapper[4702]: I1124 18:00:49.924888 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-4kfv9"] Nov 24 18:00:50 crc kubenswrapper[4702]: I1124 18:00:50.915225 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-pcdn7" event={"ID":"c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18","Type":"ContainerStarted","Data":"b1f27644452d51892baf251ed788a09e18255ae571f86ebf26036aadc04c9eaf"} Nov 24 18:00:51 crc kubenswrapper[4702]: I1124 18:00:51.658768 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7bf52d1-de85-46f5-8951-0e36986e0063" path="/var/lib/kubelet/pods/b7bf52d1-de85-46f5-8951-0e36986e0063/volumes" Nov 24 18:00:52 crc kubenswrapper[4702]: I1124 18:00:52.483239 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:00:52 crc kubenswrapper[4702]: I1124 18:00:52.483683 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:00:59 crc kubenswrapper[4702]: I1124 18:00:59.163784 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:59 crc kubenswrapper[4702]: I1124 18:00:59.165238 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:59 crc kubenswrapper[4702]: I1124 18:00:59.190215 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:00:59 crc kubenswrapper[4702]: I1124 18:00:59.203502 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-pcdn7" podStartSLOduration=10.340494671 podStartE2EDuration="11.203488649s" podCreationTimestamp="2025-11-24 18:00:48 +0000 UTC" firstStartedPulling="2025-11-24 18:00:49.346704334 +0000 UTC m=+738.587445498" lastFinishedPulling="2025-11-24 18:00:50.209698292 +0000 UTC m=+739.450439476" observedRunningTime="2025-11-24 18:00:50.930106899 +0000 UTC m=+740.170848063" watchObservedRunningTime="2025-11-24 18:00:59.203488649 +0000 UTC m=+748.444229823" Nov 24 18:00:59 crc kubenswrapper[4702]: I1124 18:00:59.990719 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-pcdn7" Nov 24 18:01:06 crc kubenswrapper[4702]: I1124 18:01:06.772039 4702 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.289957 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5"] Nov 24 18:01:08 crc kubenswrapper[4702]: E1124 18:01:08.290198 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7bf52d1-de85-46f5-8951-0e36986e0063" containerName="registry-server" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.290210 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7bf52d1-de85-46f5-8951-0e36986e0063" containerName="registry-server" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.290318 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7bf52d1-de85-46f5-8951-0e36986e0063" containerName="registry-server" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.291125 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.293365 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.302218 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5"] Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.401964 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7cbn\" (UniqueName: \"kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.402159 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.402199 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.503522 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.503572 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.503626 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7cbn\" (UniqueName: \"kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.504131 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.504202 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.527285 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7cbn\" (UniqueName: \"kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:08 crc kubenswrapper[4702]: I1124 18:01:08.606928 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:09 crc kubenswrapper[4702]: I1124 18:01:09.023225 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5"] Nov 24 18:01:10 crc kubenswrapper[4702]: I1124 18:01:10.017860 4702 generic.go:334] "Generic (PLEG): container finished" podID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerID="763541b456f6cb9baf0bfb78c73fb1a7c3554d15df8987ddfa39a5e6ad57ce21" exitCode=0 Nov 24 18:01:10 crc kubenswrapper[4702]: I1124 18:01:10.017905 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" event={"ID":"f97e8ad2-ce08-473a-b864-41b444e9fe49","Type":"ContainerDied","Data":"763541b456f6cb9baf0bfb78c73fb1a7c3554d15df8987ddfa39a5e6ad57ce21"} Nov 24 18:01:10 crc kubenswrapper[4702]: I1124 18:01:10.017931 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" event={"ID":"f97e8ad2-ce08-473a-b864-41b444e9fe49","Type":"ContainerStarted","Data":"8e469021c9d24a49fae08b573e656a6dd22a6ee2e793ba8ee5488aa12b618905"} Nov 24 18:01:11 crc kubenswrapper[4702]: I1124 18:01:11.026454 4702 generic.go:334] "Generic (PLEG): container finished" podID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerID="0aeb678a63a1a29f4627124eed882979bae996fe2e93cf959295c071e683efa2" exitCode=0 Nov 24 18:01:11 crc kubenswrapper[4702]: I1124 18:01:11.026606 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" event={"ID":"f97e8ad2-ce08-473a-b864-41b444e9fe49","Type":"ContainerDied","Data":"0aeb678a63a1a29f4627124eed882979bae996fe2e93cf959295c071e683efa2"} Nov 24 18:01:12 crc kubenswrapper[4702]: I1124 18:01:12.034055 4702 generic.go:334] "Generic (PLEG): container finished" podID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerID="549f39f9c6459471caa2bee7eda4fa3481a3a8ca101d0ed0aaf695506acb57ab" exitCode=0 Nov 24 18:01:12 crc kubenswrapper[4702]: I1124 18:01:12.034105 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" event={"ID":"f97e8ad2-ce08-473a-b864-41b444e9fe49","Type":"ContainerDied","Data":"549f39f9c6459471caa2bee7eda4fa3481a3a8ca101d0ed0aaf695506acb57ab"} Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.259481 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.262335 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle\") pod \"f97e8ad2-ce08-473a-b864-41b444e9fe49\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.262419 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7cbn\" (UniqueName: \"kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn\") pod \"f97e8ad2-ce08-473a-b864-41b444e9fe49\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.262453 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util\") pod \"f97e8ad2-ce08-473a-b864-41b444e9fe49\" (UID: \"f97e8ad2-ce08-473a-b864-41b444e9fe49\") " Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.263170 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle" (OuterVolumeSpecName: "bundle") pod "f97e8ad2-ce08-473a-b864-41b444e9fe49" (UID: "f97e8ad2-ce08-473a-b864-41b444e9fe49"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.267429 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn" (OuterVolumeSpecName: "kube-api-access-j7cbn") pod "f97e8ad2-ce08-473a-b864-41b444e9fe49" (UID: "f97e8ad2-ce08-473a-b864-41b444e9fe49"). InnerVolumeSpecName "kube-api-access-j7cbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.277129 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util" (OuterVolumeSpecName: "util") pod "f97e8ad2-ce08-473a-b864-41b444e9fe49" (UID: "f97e8ad2-ce08-473a-b864-41b444e9fe49"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.363466 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.363502 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7cbn\" (UniqueName: \"kubernetes.io/projected/f97e8ad2-ce08-473a-b864-41b444e9fe49-kube-api-access-j7cbn\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:13 crc kubenswrapper[4702]: I1124 18:01:13.363515 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f97e8ad2-ce08-473a-b864-41b444e9fe49-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:14 crc kubenswrapper[4702]: I1124 18:01:14.047291 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" event={"ID":"f97e8ad2-ce08-473a-b864-41b444e9fe49","Type":"ContainerDied","Data":"8e469021c9d24a49fae08b573e656a6dd22a6ee2e793ba8ee5488aa12b618905"} Nov 24 18:01:14 crc kubenswrapper[4702]: I1124 18:01:14.047657 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e469021c9d24a49fae08b573e656a6dd22a6ee2e793ba8ee5488aa12b618905" Nov 24 18:01:14 crc kubenswrapper[4702]: I1124 18:01:14.047348 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.162195 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw"] Nov 24 18:01:18 crc kubenswrapper[4702]: E1124 18:01:18.163531 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="extract" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.163602 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="extract" Nov 24 18:01:18 crc kubenswrapper[4702]: E1124 18:01:18.163682 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="pull" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.163733 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="pull" Nov 24 18:01:18 crc kubenswrapper[4702]: E1124 18:01:18.163870 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="util" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.163923 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="util" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.164056 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="f97e8ad2-ce08-473a-b864-41b444e9fe49" containerName="extract" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.164882 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.166481 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.168188 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-mj27m" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.178242 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw"] Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.325361 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkwln\" (UniqueName: \"kubernetes.io/projected/f235ced9-aab3-4b84-b788-8155ca736b51-kube-api-access-xkwln\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.325763 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-webhook-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.325973 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-apiservice-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.426943 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-apiservice-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.427017 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkwln\" (UniqueName: \"kubernetes.io/projected/f235ced9-aab3-4b84-b788-8155ca736b51-kube-api-access-xkwln\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.427064 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-webhook-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.432915 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-webhook-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.435252 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f235ced9-aab3-4b84-b788-8155ca736b51-apiservice-cert\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.445388 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkwln\" (UniqueName: \"kubernetes.io/projected/f235ced9-aab3-4b84-b788-8155ca736b51-kube-api-access-xkwln\") pod \"infra-operator-controller-manager-6b49b55b86-l7qhw\" (UID: \"f235ced9-aab3-4b84-b788-8155ca736b51\") " pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.482378 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:18 crc kubenswrapper[4702]: I1124 18:01:18.679544 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw"] Nov 24 18:01:18 crc kubenswrapper[4702]: W1124 18:01:18.684118 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf235ced9_aab3_4b84_b788_8155ca736b51.slice/crio-b3ff0b1711512b4be257ffb6ca486866c8283671fbab87c70a7f0e74bd6d7fbf WatchSource:0}: Error finding container b3ff0b1711512b4be257ffb6ca486866c8283671fbab87c70a7f0e74bd6d7fbf: Status 404 returned error can't find the container with id b3ff0b1711512b4be257ffb6ca486866c8283671fbab87c70a7f0e74bd6d7fbf Nov 24 18:01:19 crc kubenswrapper[4702]: I1124 18:01:19.077685 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" event={"ID":"f235ced9-aab3-4b84-b788-8155ca736b51","Type":"ContainerStarted","Data":"b3ff0b1711512b4be257ffb6ca486866c8283671fbab87c70a7f0e74bd6d7fbf"} Nov 24 18:01:21 crc kubenswrapper[4702]: I1124 18:01:21.092916 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" event={"ID":"f235ced9-aab3-4b84-b788-8155ca736b51","Type":"ContainerStarted","Data":"ab1f14731707f2aaead6d0cdd9cfe308f26742afa46cc452ecd326be3c7a3531"} Nov 24 18:01:21 crc kubenswrapper[4702]: I1124 18:01:21.094602 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:21 crc kubenswrapper[4702]: I1124 18:01:21.094690 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" event={"ID":"f235ced9-aab3-4b84-b788-8155ca736b51","Type":"ContainerStarted","Data":"9abfbfaae1a6415bd9e6a1960870abdbb68c274831061d41ed7d27a5656b8185"} Nov 24 18:01:21 crc kubenswrapper[4702]: I1124 18:01:21.116490 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" podStartSLOduration=1.472797315 podStartE2EDuration="3.116470778s" podCreationTimestamp="2025-11-24 18:01:18 +0000 UTC" firstStartedPulling="2025-11-24 18:01:18.686465042 +0000 UTC m=+767.927206206" lastFinishedPulling="2025-11-24 18:01:20.330138505 +0000 UTC m=+769.570879669" observedRunningTime="2025-11-24 18:01:21.113314954 +0000 UTC m=+770.354056148" watchObservedRunningTime="2025-11-24 18:01:21.116470778 +0000 UTC m=+770.357211952" Nov 24 18:01:22 crc kubenswrapper[4702]: I1124 18:01:22.482884 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:01:22 crc kubenswrapper[4702]: I1124 18:01:22.483233 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:01:22 crc kubenswrapper[4702]: I1124 18:01:22.483285 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 18:01:22 crc kubenswrapper[4702]: I1124 18:01:22.483863 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:01:22 crc kubenswrapper[4702]: I1124 18:01:22.483925 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b" gracePeriod=600 Nov 24 18:01:23 crc kubenswrapper[4702]: I1124 18:01:23.105498 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b" exitCode=0 Nov 24 18:01:23 crc kubenswrapper[4702]: I1124 18:01:23.105588 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b"} Nov 24 18:01:23 crc kubenswrapper[4702]: I1124 18:01:23.105868 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c"} Nov 24 18:01:23 crc kubenswrapper[4702]: I1124 18:01:23.105902 4702 scope.go:117] "RemoveContainer" containerID="d8db456ada4d9e9dbcf1eb529e962032d09cb8ae8f934769c1e203a91f3a1815" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.889004 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.890395 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.891984 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"galera-openstack-dockercfg-8nb5b" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.892498 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-config-data" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.892602 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openshift-service-ca.crt" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.894691 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"kube-root-ca.crt" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.896383 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.897567 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.899635 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.901072 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.902838 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.904915 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-scripts" Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.928872 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Nov 24 18:01:27 crc kubenswrapper[4702]: I1124 18:01:27.934704 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.041783 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6srp\" (UniqueName: \"kubernetes.io/projected/ea71d590-80f8-45c8-9db2-d163f5516941-kube-api-access-v6srp\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.041870 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhp4x\" (UniqueName: \"kubernetes.io/projected/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kube-api-access-jhp4x\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.041908 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/43554378-0651-47bf-a8b8-658ad5843651-config-data-generated\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.041940 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-kolla-config\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042002 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-default\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042053 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-operator-scripts\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042078 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccvpb\" (UniqueName: \"kubernetes.io/projected/43554378-0651-47bf-a8b8-658ad5843651-kube-api-access-ccvpb\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042107 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042137 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-operator-scripts\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042157 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042185 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-generated\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042206 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042233 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-kolla-config\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042246 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042265 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042286 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042313 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.042334 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-config-data-default\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144126 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-kolla-config\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144170 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-default\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144202 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-operator-scripts\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144225 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccvpb\" (UniqueName: \"kubernetes.io/projected/43554378-0651-47bf-a8b8-658ad5843651-kube-api-access-ccvpb\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144249 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144278 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-operator-scripts\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144301 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144332 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-generated\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144360 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144388 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-kolla-config\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144409 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144430 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144454 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144483 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144507 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-config-data-default\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144546 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6srp\" (UniqueName: \"kubernetes.io/projected/ea71d590-80f8-45c8-9db2-d163f5516941-kube-api-access-v6srp\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144568 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhp4x\" (UniqueName: \"kubernetes.io/projected/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kube-api-access-jhp4x\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144592 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/43554378-0651-47bf-a8b8-658ad5843651-config-data-generated\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.144839 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") device mount path \"/mnt/openstack/pv08\"" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.145200 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/43554378-0651-47bf-a8b8-658ad5843651-config-data-generated\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.145208 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.145332 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.145540 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") device mount path \"/mnt/openstack/pv01\"" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.145870 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-generated\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.146374 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-kolla-config\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.146393 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.146684 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-kolla-config\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.146724 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-config-data-default\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.147061 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-config-data-default\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.147794 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.148118 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43554378-0651-47bf-a8b8-658ad5843651-operator-scripts\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.148380 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d49d1cbe-321e-4459-951f-f7efcc8ed02e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.149434 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea71d590-80f8-45c8-9db2-d163f5516941-operator-scripts\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.164529 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.165227 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.165249 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhp4x\" (UniqueName: \"kubernetes.io/projected/d49d1cbe-321e-4459-951f-f7efcc8ed02e-kube-api-access-jhp4x\") pod \"openstack-galera-0\" (UID: \"d49d1cbe-321e-4459-951f-f7efcc8ed02e\") " pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.165238 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6srp\" (UniqueName: \"kubernetes.io/projected/ea71d590-80f8-45c8-9db2-d163f5516941-kube-api-access-v6srp\") pod \"openstack-galera-2\" (UID: \"ea71d590-80f8-45c8-9db2-d163f5516941\") " pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.165543 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccvpb\" (UniqueName: \"kubernetes.io/projected/43554378-0651-47bf-a8b8-658ad5843651-kube-api-access-ccvpb\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.165692 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"43554378-0651-47bf-a8b8-658ad5843651\") " pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.218207 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.228708 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.236687 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.486188 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6b49b55b86-l7qhw" Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.622304 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.625721 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Nov 24 18:01:28 crc kubenswrapper[4702]: W1124 18:01:28.627623 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea71d590_80f8_45c8_9db2_d163f5516941.slice/crio-a6daa56fc3913b44c8c7e703b090598841089b20729504714eb09eae97cef3f5 WatchSource:0}: Error finding container a6daa56fc3913b44c8c7e703b090598841089b20729504714eb09eae97cef3f5: Status 404 returned error can't find the container with id a6daa56fc3913b44c8c7e703b090598841089b20729504714eb09eae97cef3f5 Nov 24 18:01:28 crc kubenswrapper[4702]: I1124 18:01:28.697651 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Nov 24 18:01:28 crc kubenswrapper[4702]: W1124 18:01:28.701074 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43554378_0651_47bf_a8b8_658ad5843651.slice/crio-022a5d8380aad3faca10179a21651229995ccf7638e17621fb922d956e777857 WatchSource:0}: Error finding container 022a5d8380aad3faca10179a21651229995ccf7638e17621fb922d956e777857: Status 404 returned error can't find the container with id 022a5d8380aad3faca10179a21651229995ccf7638e17621fb922d956e777857 Nov 24 18:01:29 crc kubenswrapper[4702]: I1124 18:01:29.140466 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"43554378-0651-47bf-a8b8-658ad5843651","Type":"ContainerStarted","Data":"022a5d8380aad3faca10179a21651229995ccf7638e17621fb922d956e777857"} Nov 24 18:01:29 crc kubenswrapper[4702]: I1124 18:01:29.141358 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"ea71d590-80f8-45c8-9db2-d163f5516941","Type":"ContainerStarted","Data":"a6daa56fc3913b44c8c7e703b090598841089b20729504714eb09eae97cef3f5"} Nov 24 18:01:29 crc kubenswrapper[4702]: I1124 18:01:29.142127 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"d49d1cbe-321e-4459-951f-f7efcc8ed02e","Type":"ContainerStarted","Data":"16d785f5cec2d63a92f5ebb232f1746bafe986128a57479a4e5ba0ce66cf8230"} Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.151042 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/memcached-0"] Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.152068 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.154002 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"memcached-memcached-dockercfg-5s4ng" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.154919 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"memcached-config-data" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.169838 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/memcached-0"] Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.272756 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97smv\" (UniqueName: \"kubernetes.io/projected/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kube-api-access-97smv\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.272855 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-config-data\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.272892 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kolla-config\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.374578 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kolla-config\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.374681 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97smv\" (UniqueName: \"kubernetes.io/projected/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kube-api-access-97smv\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.374752 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-config-data\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.375628 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kolla-config\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.375641 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-config-data\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.394264 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97smv\" (UniqueName: \"kubernetes.io/projected/eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154-kube-api-access-97smv\") pod \"memcached-0\" (UID: \"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154\") " pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.471223 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:30 crc kubenswrapper[4702]: I1124 18:01:30.759927 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/memcached-0"] Nov 24 18:01:31 crc kubenswrapper[4702]: I1124 18:01:31.182878 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/memcached-0" event={"ID":"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154","Type":"ContainerStarted","Data":"c2a19c51ac8e4a7c1888c3be785c435573ee8a9bfd30d5a14e2844c0cff6bbd4"} Nov 24 18:01:32 crc kubenswrapper[4702]: I1124 18:01:32.844281 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-n79bv"] Nov 24 18:01:32 crc kubenswrapper[4702]: I1124 18:01:32.845342 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:32 crc kubenswrapper[4702]: I1124 18:01:32.849614 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-c2klk" Nov 24 18:01:32 crc kubenswrapper[4702]: I1124 18:01:32.857930 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-n79bv"] Nov 24 18:01:33 crc kubenswrapper[4702]: I1124 18:01:33.011352 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27dv6\" (UniqueName: \"kubernetes.io/projected/81d84f21-a3ee-4817-a631-03b27359f592-kube-api-access-27dv6\") pod \"rabbitmq-cluster-operator-index-n79bv\" (UID: \"81d84f21-a3ee-4817-a631-03b27359f592\") " pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:33 crc kubenswrapper[4702]: I1124 18:01:33.112526 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27dv6\" (UniqueName: \"kubernetes.io/projected/81d84f21-a3ee-4817-a631-03b27359f592-kube-api-access-27dv6\") pod \"rabbitmq-cluster-operator-index-n79bv\" (UID: \"81d84f21-a3ee-4817-a631-03b27359f592\") " pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:33 crc kubenswrapper[4702]: I1124 18:01:33.129871 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27dv6\" (UniqueName: \"kubernetes.io/projected/81d84f21-a3ee-4817-a631-03b27359f592-kube-api-access-27dv6\") pod \"rabbitmq-cluster-operator-index-n79bv\" (UID: \"81d84f21-a3ee-4817-a631-03b27359f592\") " pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:33 crc kubenswrapper[4702]: I1124 18:01:33.166292 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:38 crc kubenswrapper[4702]: I1124 18:01:38.535964 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-n79bv"] Nov 24 18:01:38 crc kubenswrapper[4702]: W1124 18:01:38.543943 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81d84f21_a3ee_4817_a631_03b27359f592.slice/crio-10a1495f0e5ef2529f1e14c3f5b321831ec24e7c71d5c01ae23b1c3cb829b0ea WatchSource:0}: Error finding container 10a1495f0e5ef2529f1e14c3f5b321831ec24e7c71d5c01ae23b1c3cb829b0ea: Status 404 returned error can't find the container with id 10a1495f0e5ef2529f1e14c3f5b321831ec24e7c71d5c01ae23b1c3cb829b0ea Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.229111 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"ea71d590-80f8-45c8-9db2-d163f5516941","Type":"ContainerStarted","Data":"6be3568a9c797e82ce73c9406746f7d29764527b39f9f45c87dc4f86e4eedd45"} Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.230662 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/memcached-0" event={"ID":"eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154","Type":"ContainerStarted","Data":"2ed28eb49962cda03ee2e41f50ecd596a8bb21786c0ec75bb6cfa30ed2b3fc0b"} Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.230834 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.234057 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"d49d1cbe-321e-4459-951f-f7efcc8ed02e","Type":"ContainerStarted","Data":"9ab650e5465e1079605de0948fa5aa28a923268f935dfe4310dad815669814a0"} Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.236327 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" event={"ID":"81d84f21-a3ee-4817-a631-03b27359f592","Type":"ContainerStarted","Data":"10a1495f0e5ef2529f1e14c3f5b321831ec24e7c71d5c01ae23b1c3cb829b0ea"} Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.237964 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"43554378-0651-47bf-a8b8-658ad5843651","Type":"ContainerStarted","Data":"ab9e7bdc601797a52a543bcdae7a2a936689dcc60e8b7eeeb330441db08028d2"} Nov 24 18:01:39 crc kubenswrapper[4702]: I1124 18:01:39.265287 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/memcached-0" podStartSLOduration=1.729846896 podStartE2EDuration="9.265265517s" podCreationTimestamp="2025-11-24 18:01:30 +0000 UTC" firstStartedPulling="2025-11-24 18:01:30.76966154 +0000 UTC m=+780.010402704" lastFinishedPulling="2025-11-24 18:01:38.305080161 +0000 UTC m=+787.545821325" observedRunningTime="2025-11-24 18:01:39.261583121 +0000 UTC m=+788.502324285" watchObservedRunningTime="2025-11-24 18:01:39.265265517 +0000 UTC m=+788.506006681" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.449178 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.450863 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.463069 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.525460 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckbzt\" (UniqueName: \"kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.525684 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.525792 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.627250 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckbzt\" (UniqueName: \"kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.627345 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.627384 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.627852 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.628070 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.647170 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckbzt\" (UniqueName: \"kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt\") pod \"community-operators-k7d7h\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:40 crc kubenswrapper[4702]: I1124 18:01:40.775171 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:41 crc kubenswrapper[4702]: I1124 18:01:41.039022 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:41 crc kubenswrapper[4702]: I1124 18:01:41.261315 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerStarted","Data":"3b115368e518e3403d1f9c96b74959e0dcf5cbb12ba444d9dfa0ac3556113424"} Nov 24 18:01:42 crc kubenswrapper[4702]: I1124 18:01:42.267983 4702 generic.go:334] "Generic (PLEG): container finished" podID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerID="2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310" exitCode=0 Nov 24 18:01:42 crc kubenswrapper[4702]: I1124 18:01:42.268040 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerDied","Data":"2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310"} Nov 24 18:01:43 crc kubenswrapper[4702]: I1124 18:01:43.274492 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" event={"ID":"81d84f21-a3ee-4817-a631-03b27359f592","Type":"ContainerStarted","Data":"d308a4e5ab9ed5fb5e4e72686bedb379768b598fcffabe823d9138c8cf16f1b2"} Nov 24 18:01:43 crc kubenswrapper[4702]: I1124 18:01:43.288858 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" podStartSLOduration=7.092456151 podStartE2EDuration="11.288842187s" podCreationTimestamp="2025-11-24 18:01:32 +0000 UTC" firstStartedPulling="2025-11-24 18:01:38.545971278 +0000 UTC m=+787.786712442" lastFinishedPulling="2025-11-24 18:01:42.742357314 +0000 UTC m=+791.983098478" observedRunningTime="2025-11-24 18:01:43.288257182 +0000 UTC m=+792.528998366" watchObservedRunningTime="2025-11-24 18:01:43.288842187 +0000 UTC m=+792.529583351" Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.287462 4702 generic.go:334] "Generic (PLEG): container finished" podID="d49d1cbe-321e-4459-951f-f7efcc8ed02e" containerID="9ab650e5465e1079605de0948fa5aa28a923268f935dfe4310dad815669814a0" exitCode=0 Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.287559 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"d49d1cbe-321e-4459-951f-f7efcc8ed02e","Type":"ContainerDied","Data":"9ab650e5465e1079605de0948fa5aa28a923268f935dfe4310dad815669814a0"} Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.293178 4702 generic.go:334] "Generic (PLEG): container finished" podID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerID="79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c" exitCode=0 Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.293274 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerDied","Data":"79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c"} Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.296726 4702 generic.go:334] "Generic (PLEG): container finished" podID="ea71d590-80f8-45c8-9db2-d163f5516941" containerID="6be3568a9c797e82ce73c9406746f7d29764527b39f9f45c87dc4f86e4eedd45" exitCode=0 Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.296830 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"ea71d590-80f8-45c8-9db2-d163f5516941","Type":"ContainerDied","Data":"6be3568a9c797e82ce73c9406746f7d29764527b39f9f45c87dc4f86e4eedd45"} Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.299474 4702 generic.go:334] "Generic (PLEG): container finished" podID="43554378-0651-47bf-a8b8-658ad5843651" containerID="ab9e7bdc601797a52a543bcdae7a2a936689dcc60e8b7eeeb330441db08028d2" exitCode=0 Nov 24 18:01:44 crc kubenswrapper[4702]: I1124 18:01:44.299548 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"43554378-0651-47bf-a8b8-658ad5843651","Type":"ContainerDied","Data":"ab9e7bdc601797a52a543bcdae7a2a936689dcc60e8b7eeeb330441db08028d2"} Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.307734 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"43554378-0651-47bf-a8b8-658ad5843651","Type":"ContainerStarted","Data":"71081eabd95ec06188cb146a73b627389bb44574caa2b0f97027fa1cd1a5624e"} Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.310869 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"ea71d590-80f8-45c8-9db2-d163f5516941","Type":"ContainerStarted","Data":"2bef3fe487e63206e028ab27194a68d860cdba98c307063baf00f3448b1288d2"} Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.313068 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"d49d1cbe-321e-4459-951f-f7efcc8ed02e","Type":"ContainerStarted","Data":"5328d7f22d19d589035d597bd78bf69bf4da92fd4911ac496412653f4491eade"} Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.315776 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerStarted","Data":"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4"} Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.328883 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-1" podStartSLOduration=9.686159498 podStartE2EDuration="19.328861246s" podCreationTimestamp="2025-11-24 18:01:26 +0000 UTC" firstStartedPulling="2025-11-24 18:01:28.703133206 +0000 UTC m=+777.943874380" lastFinishedPulling="2025-11-24 18:01:38.345834974 +0000 UTC m=+787.586576128" observedRunningTime="2025-11-24 18:01:45.326503904 +0000 UTC m=+794.567245078" watchObservedRunningTime="2025-11-24 18:01:45.328861246 +0000 UTC m=+794.569602420" Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.346423 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-2" podStartSLOduration=9.675883448 podStartE2EDuration="19.346399517s" podCreationTimestamp="2025-11-24 18:01:26 +0000 UTC" firstStartedPulling="2025-11-24 18:01:28.629569572 +0000 UTC m=+777.870310736" lastFinishedPulling="2025-11-24 18:01:38.300085641 +0000 UTC m=+787.540826805" observedRunningTime="2025-11-24 18:01:45.341262742 +0000 UTC m=+794.582003926" watchObservedRunningTime="2025-11-24 18:01:45.346399517 +0000 UTC m=+794.587140701" Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.362501 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-0" podStartSLOduration=9.692000083 podStartE2EDuration="19.36247913s" podCreationTimestamp="2025-11-24 18:01:26 +0000 UTC" firstStartedPulling="2025-11-24 18:01:28.628203546 +0000 UTC m=+777.868944710" lastFinishedPulling="2025-11-24 18:01:38.298682593 +0000 UTC m=+787.539423757" observedRunningTime="2025-11-24 18:01:45.358637159 +0000 UTC m=+794.599378323" watchObservedRunningTime="2025-11-24 18:01:45.36247913 +0000 UTC m=+794.603220314" Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.376864 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k7d7h" podStartSLOduration=3.116626708 podStartE2EDuration="5.376834328s" podCreationTimestamp="2025-11-24 18:01:40 +0000 UTC" firstStartedPulling="2025-11-24 18:01:42.637510136 +0000 UTC m=+791.878251300" lastFinishedPulling="2025-11-24 18:01:44.897717756 +0000 UTC m=+794.138458920" observedRunningTime="2025-11-24 18:01:45.376391106 +0000 UTC m=+794.617132270" watchObservedRunningTime="2025-11-24 18:01:45.376834328 +0000 UTC m=+794.617575512" Nov 24 18:01:45 crc kubenswrapper[4702]: I1124 18:01:45.473087 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/memcached-0" Nov 24 18:01:48 crc kubenswrapper[4702]: E1124 18:01:48.133617 4702 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:47978->38.102.83.65:40425: write tcp 38.102.83.65:47978->38.102.83.65:40425: write: broken pipe Nov 24 18:01:48 crc kubenswrapper[4702]: E1124 18:01:48.189170 4702 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:47990->38.102.83.65:40425: write tcp 38.102.83.65:47990->38.102.83.65:40425: write: broken pipe Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.219109 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.219431 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.228948 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.228991 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.237712 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:48 crc kubenswrapper[4702]: I1124 18:01:48.237750 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:01:50 crc kubenswrapper[4702]: I1124 18:01:50.775755 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:50 crc kubenswrapper[4702]: I1124 18:01:50.776069 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:50 crc kubenswrapper[4702]: I1124 18:01:50.812019 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:51 crc kubenswrapper[4702]: I1124 18:01:51.387987 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:52 crc kubenswrapper[4702]: E1124 18:01:52.128704 4702 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:48018->38.102.83.65:40425: write tcp 38.102.83.65:48018->38.102.83.65:40425: write: broken pipe Nov 24 18:01:53 crc kubenswrapper[4702]: I1124 18:01:53.166784 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:53 crc kubenswrapper[4702]: I1124 18:01:53.167200 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:53 crc kubenswrapper[4702]: I1124 18:01:53.191651 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:53 crc kubenswrapper[4702]: I1124 18:01:53.382377 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-n79bv" Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.301527 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.365022 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-2" Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.439061 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.439279 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k7d7h" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="registry-server" containerID="cri-o://78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4" gracePeriod=2 Nov 24 18:01:54 crc kubenswrapper[4702]: E1124 18:01:54.536462 4702 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:39632->38.102.83.65:40425: write tcp 38.102.83.65:39632->38.102.83.65:40425: write: broken pipe Nov 24 18:01:54 crc kubenswrapper[4702]: E1124 18:01:54.645307 4702 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.65:39644->38.102.83.65:40425: read tcp 38.102.83.65:39644->38.102.83.65:40425: read: connection reset by peer Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.789460 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.910696 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckbzt\" (UniqueName: \"kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt\") pod \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.911052 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities\") pod \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.911107 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content\") pod \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\" (UID: \"0f33339c-f3aa-4d68-9f19-4f89fdb350ed\") " Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.911650 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities" (OuterVolumeSpecName: "utilities") pod "0f33339c-f3aa-4d68-9f19-4f89fdb350ed" (UID: "0f33339c-f3aa-4d68-9f19-4f89fdb350ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:54 crc kubenswrapper[4702]: I1124 18:01:54.916915 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt" (OuterVolumeSpecName: "kube-api-access-ckbzt") pod "0f33339c-f3aa-4d68-9f19-4f89fdb350ed" (UID: "0f33339c-f3aa-4d68-9f19-4f89fdb350ed"). InnerVolumeSpecName "kube-api-access-ckbzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.012468 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckbzt\" (UniqueName: \"kubernetes.io/projected/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-kube-api-access-ckbzt\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.012507 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.371965 4702 generic.go:334] "Generic (PLEG): container finished" podID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerID="78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4" exitCode=0 Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.372026 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k7d7h" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.372030 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerDied","Data":"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4"} Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.372422 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k7d7h" event={"ID":"0f33339c-f3aa-4d68-9f19-4f89fdb350ed","Type":"ContainerDied","Data":"3b115368e518e3403d1f9c96b74959e0dcf5cbb12ba444d9dfa0ac3556113424"} Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.372443 4702 scope.go:117] "RemoveContainer" containerID="78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.387748 4702 scope.go:117] "RemoveContainer" containerID="79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.404130 4702 scope.go:117] "RemoveContainer" containerID="2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.428350 4702 scope.go:117] "RemoveContainer" containerID="78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4" Nov 24 18:01:55 crc kubenswrapper[4702]: E1124 18:01:55.429128 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4\": container with ID starting with 78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4 not found: ID does not exist" containerID="78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.429205 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4"} err="failed to get container status \"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4\": rpc error: code = NotFound desc = could not find container \"78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4\": container with ID starting with 78e7a1841a198eac30234fda8a211ac8180dd40c2acaca06d66635c9540d42b4 not found: ID does not exist" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.429234 4702 scope.go:117] "RemoveContainer" containerID="79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c" Nov 24 18:01:55 crc kubenswrapper[4702]: E1124 18:01:55.429491 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c\": container with ID starting with 79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c not found: ID does not exist" containerID="79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.429524 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c"} err="failed to get container status \"79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c\": rpc error: code = NotFound desc = could not find container \"79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c\": container with ID starting with 79ef044280b22645a085d5ed537e01b7499636698ef92d23e7546010fca6510c not found: ID does not exist" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.429541 4702 scope.go:117] "RemoveContainer" containerID="2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310" Nov 24 18:01:55 crc kubenswrapper[4702]: E1124 18:01:55.430001 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310\": container with ID starting with 2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310 not found: ID does not exist" containerID="2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.430067 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310"} err="failed to get container status \"2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310\": rpc error: code = NotFound desc = could not find container \"2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310\": container with ID starting with 2c822a8ff40ad64d1c02398ee03a92068d001cb5f7370cf097d7cc5ec6833310 not found: ID does not exist" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.622546 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0f33339c-f3aa-4d68-9f19-4f89fdb350ed" (UID: "0f33339c-f3aa-4d68-9f19-4f89fdb350ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.687755 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.691232 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k7d7h"] Nov 24 18:01:55 crc kubenswrapper[4702]: I1124 18:01:55.722001 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f33339c-f3aa-4d68-9f19-4f89fdb350ed-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:57 crc kubenswrapper[4702]: I1124 18:01:57.660187 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" path="/var/lib/kubelet/pods/0f33339c-f3aa-4d68-9f19-4f89fdb350ed/volumes" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.446136 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:03 crc kubenswrapper[4702]: E1124 18:02:03.446708 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="registry-server" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.446727 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="registry-server" Nov 24 18:02:03 crc kubenswrapper[4702]: E1124 18:02:03.446751 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="extract-content" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.446759 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="extract-content" Nov 24 18:02:03 crc kubenswrapper[4702]: E1124 18:02:03.446781 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="extract-utilities" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.446789 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="extract-utilities" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.446962 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f33339c-f3aa-4d68-9f19-4f89fdb350ed" containerName="registry-server" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.447953 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.456234 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.519899 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.519997 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.520054 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p66tf\" (UniqueName: \"kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.620964 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.621050 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.621075 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p66tf\" (UniqueName: \"kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.621505 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.621584 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.643374 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p66tf\" (UniqueName: \"kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf\") pod \"certified-operators-vt4pr\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:03 crc kubenswrapper[4702]: I1124 18:02:03.777835 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:04 crc kubenswrapper[4702]: I1124 18:02:04.214611 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:04 crc kubenswrapper[4702]: W1124 18:02:04.220017 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34fba9c5_ea99_4bd5_83bf_0de03a245baa.slice/crio-d91354095c6086af15c4d8f30f99aac51294d2713b5d178034c99090d669594c WatchSource:0}: Error finding container d91354095c6086af15c4d8f30f99aac51294d2713b5d178034c99090d669594c: Status 404 returned error can't find the container with id d91354095c6086af15c4d8f30f99aac51294d2713b5d178034c99090d669594c Nov 24 18:02:04 crc kubenswrapper[4702]: I1124 18:02:04.428849 4702 generic.go:334] "Generic (PLEG): container finished" podID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerID="6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab" exitCode=0 Nov 24 18:02:04 crc kubenswrapper[4702]: I1124 18:02:04.428890 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerDied","Data":"6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab"} Nov 24 18:02:04 crc kubenswrapper[4702]: I1124 18:02:04.428912 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerStarted","Data":"d91354095c6086af15c4d8f30f99aac51294d2713b5d178034c99090d669594c"} Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.501560 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8"] Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.503113 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.505652 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.513893 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8"] Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.649065 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.649128 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.649229 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vzvp\" (UniqueName: \"kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.750230 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vzvp\" (UniqueName: \"kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.750375 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.750421 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.750914 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.751072 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.771399 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vzvp\" (UniqueName: \"kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:05 crc kubenswrapper[4702]: I1124 18:02:05.831745 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:06 crc kubenswrapper[4702]: I1124 18:02:06.230515 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8"] Nov 24 18:02:06 crc kubenswrapper[4702]: I1124 18:02:06.442636 4702 generic.go:334] "Generic (PLEG): container finished" podID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerID="4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d" exitCode=0 Nov 24 18:02:06 crc kubenswrapper[4702]: I1124 18:02:06.442700 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerDied","Data":"4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d"} Nov 24 18:02:06 crc kubenswrapper[4702]: I1124 18:02:06.444994 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerStarted","Data":"71288fc64481e93c396c07d063795b13e9927d1e2d73969c0919859eebaf2d78"} Nov 24 18:02:06 crc kubenswrapper[4702]: I1124 18:02:06.445021 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerStarted","Data":"4e7902957a0686f6654ea94b8b47e31dba5b90ebd26e061b0eedce8077bdd2ed"} Nov 24 18:02:07 crc kubenswrapper[4702]: I1124 18:02:07.454343 4702 generic.go:334] "Generic (PLEG): container finished" podID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerID="71288fc64481e93c396c07d063795b13e9927d1e2d73969c0919859eebaf2d78" exitCode=0 Nov 24 18:02:07 crc kubenswrapper[4702]: I1124 18:02:07.454484 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerDied","Data":"71288fc64481e93c396c07d063795b13e9927d1e2d73969c0919859eebaf2d78"} Nov 24 18:02:07 crc kubenswrapper[4702]: I1124 18:02:07.459748 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerStarted","Data":"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70"} Nov 24 18:02:07 crc kubenswrapper[4702]: I1124 18:02:07.492573 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vt4pr" podStartSLOduration=2.094051461 podStartE2EDuration="4.49255274s" podCreationTimestamp="2025-11-24 18:02:03 +0000 UTC" firstStartedPulling="2025-11-24 18:02:04.431551404 +0000 UTC m=+813.672292568" lastFinishedPulling="2025-11-24 18:02:06.830052693 +0000 UTC m=+816.070793847" observedRunningTime="2025-11-24 18:02:07.49110552 +0000 UTC m=+816.731846704" watchObservedRunningTime="2025-11-24 18:02:07.49255274 +0000 UTC m=+816.733293904" Nov 24 18:02:08 crc kubenswrapper[4702]: I1124 18:02:08.727271 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:02:08 crc kubenswrapper[4702]: I1124 18:02:08.785171 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-1" Nov 24 18:02:09 crc kubenswrapper[4702]: I1124 18:02:09.472041 4702 generic.go:334] "Generic (PLEG): container finished" podID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerID="7724e8e11e059a7feae8901a91fd660997c19b9344588fc7fb9756fd3f3d80e3" exitCode=0 Nov 24 18:02:09 crc kubenswrapper[4702]: I1124 18:02:09.472103 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerDied","Data":"7724e8e11e059a7feae8901a91fd660997c19b9344588fc7fb9756fd3f3d80e3"} Nov 24 18:02:09 crc kubenswrapper[4702]: I1124 18:02:09.730032 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:02:09 crc kubenswrapper[4702]: I1124 18:02:09.799242 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-0" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.481323 4702 generic.go:334] "Generic (PLEG): container finished" podID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerID="05d641ea2d41d80bbac12235a91233bf39c8c5f8b4e760d0e52892d8b5e9407c" exitCode=0 Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.481389 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerDied","Data":"05d641ea2d41d80bbac12235a91233bf39c8c5f8b4e760d0e52892d8b5e9407c"} Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.655459 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.657815 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.681476 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.816247 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vl99\" (UniqueName: \"kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.816583 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.816687 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.917944 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.918012 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.918073 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vl99\" (UniqueName: \"kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.918550 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.918819 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.939139 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vl99\" (UniqueName: \"kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99\") pod \"redhat-operators-bp6qf\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:10 crc kubenswrapper[4702]: I1124 18:02:10.993702 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.200621 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:11 crc kubenswrapper[4702]: W1124 18:02:11.212745 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24937c87_8ce1_486c_bfc9_0dcd287a31a9.slice/crio-39204b6116c2845a85af73215486327031c73b34920c1a9713ac188e4bb31296 WatchSource:0}: Error finding container 39204b6116c2845a85af73215486327031c73b34920c1a9713ac188e4bb31296: Status 404 returned error can't find the container with id 39204b6116c2845a85af73215486327031c73b34920c1a9713ac188e4bb31296 Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.487761 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerStarted","Data":"39204b6116c2845a85af73215486327031c73b34920c1a9713ac188e4bb31296"} Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.707575 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.832025 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle\") pod \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.832139 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vzvp\" (UniqueName: \"kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp\") pod \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.832269 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util\") pod \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\" (UID: \"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e\") " Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.832955 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle" (OuterVolumeSpecName: "bundle") pod "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" (UID: "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.838320 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp" (OuterVolumeSpecName: "kube-api-access-7vzvp") pod "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" (UID: "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e"). InnerVolumeSpecName "kube-api-access-7vzvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.918399 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util" (OuterVolumeSpecName: "util") pod "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" (UID: "c71d5f19-b4ff-4070-b5a7-71520ffa8b6e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.933746 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.933770 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:11 crc kubenswrapper[4702]: I1124 18:02:11.933779 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vzvp\" (UniqueName: \"kubernetes.io/projected/c71d5f19-b4ff-4070-b5a7-71520ffa8b6e-kube-api-access-7vzvp\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.445437 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:12 crc kubenswrapper[4702]: E1124 18:02:12.445896 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="pull" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.445963 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="pull" Nov 24 18:02:12 crc kubenswrapper[4702]: E1124 18:02:12.446022 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="extract" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.446072 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="extract" Nov 24 18:02:12 crc kubenswrapper[4702]: E1124 18:02:12.446136 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="util" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.446185 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="util" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.446368 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="c71d5f19-b4ff-4070-b5a7-71520ffa8b6e" containerName="extract" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.447317 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.454917 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.499413 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" event={"ID":"c71d5f19-b4ff-4070-b5a7-71520ffa8b6e","Type":"ContainerDied","Data":"4e7902957a0686f6654ea94b8b47e31dba5b90ebd26e061b0eedce8077bdd2ed"} Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.499461 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e7902957a0686f6654ea94b8b47e31dba5b90ebd26e061b0eedce8077bdd2ed" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.499834 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.502264 4702 generic.go:334] "Generic (PLEG): container finished" podID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerID="ec9d7acc01e05fa6053bcd4e2951d424b226986cfa0e076172fcf16a9282e2cb" exitCode=0 Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.502308 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerDied","Data":"ec9d7acc01e05fa6053bcd4e2951d424b226986cfa0e076172fcf16a9282e2cb"} Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.541460 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7x4d\" (UniqueName: \"kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.541852 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.541881 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.643017 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.643066 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.643136 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7x4d\" (UniqueName: \"kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.644253 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.644316 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.663985 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7x4d\" (UniqueName: \"kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d\") pod \"redhat-marketplace-8qdwj\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.776705 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:12 crc kubenswrapper[4702]: I1124 18:02:12.977722 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.509986 4702 generic.go:334] "Generic (PLEG): container finished" podID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerID="8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122" exitCode=0 Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.510037 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerDied","Data":"8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122"} Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.510075 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerStarted","Data":"77edc54c5fb72c35206ee596a145bfa5ee24c3ccd6dda97c6bbed2956a824e8e"} Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.778739 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.779074 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:13 crc kubenswrapper[4702]: I1124 18:02:13.815385 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:14 crc kubenswrapper[4702]: I1124 18:02:14.516770 4702 generic.go:334] "Generic (PLEG): container finished" podID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerID="586dc48e7df04272e16b979292d538a314d2f767f85664ec036c7a19d415ccf3" exitCode=0 Nov 24 18:02:14 crc kubenswrapper[4702]: I1124 18:02:14.516853 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerDied","Data":"586dc48e7df04272e16b979292d538a314d2f767f85664ec036c7a19d415ccf3"} Nov 24 18:02:14 crc kubenswrapper[4702]: I1124 18:02:14.565757 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:15 crc kubenswrapper[4702]: I1124 18:02:15.527180 4702 generic.go:334] "Generic (PLEG): container finished" podID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerID="e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6" exitCode=0 Nov 24 18:02:15 crc kubenswrapper[4702]: I1124 18:02:15.527223 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerDied","Data":"e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6"} Nov 24 18:02:15 crc kubenswrapper[4702]: I1124 18:02:15.529408 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerStarted","Data":"f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4"} Nov 24 18:02:15 crc kubenswrapper[4702]: I1124 18:02:15.563532 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bp6qf" podStartSLOduration=3.158697011 podStartE2EDuration="5.563504905s" podCreationTimestamp="2025-11-24 18:02:10 +0000 UTC" firstStartedPulling="2025-11-24 18:02:12.505489532 +0000 UTC m=+821.746230706" lastFinishedPulling="2025-11-24 18:02:14.910297436 +0000 UTC m=+824.151038600" observedRunningTime="2025-11-24 18:02:15.562090136 +0000 UTC m=+824.802831300" watchObservedRunningTime="2025-11-24 18:02:15.563504905 +0000 UTC m=+824.804246119" Nov 24 18:02:16 crc kubenswrapper[4702]: I1124 18:02:16.539542 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerStarted","Data":"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333"} Nov 24 18:02:16 crc kubenswrapper[4702]: I1124 18:02:16.558482 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8qdwj" podStartSLOduration=2.175594868 podStartE2EDuration="4.558464304s" podCreationTimestamp="2025-11-24 18:02:12 +0000 UTC" firstStartedPulling="2025-11-24 18:02:13.529450984 +0000 UTC m=+822.770192148" lastFinishedPulling="2025-11-24 18:02:15.91232042 +0000 UTC m=+825.153061584" observedRunningTime="2025-11-24 18:02:16.554942016 +0000 UTC m=+825.795683180" watchObservedRunningTime="2025-11-24 18:02:16.558464304 +0000 UTC m=+825.799205468" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.038737 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.039303 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vt4pr" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="registry-server" containerID="cri-o://806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70" gracePeriod=2 Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.416942 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.546948 4702 generic.go:334] "Generic (PLEG): container finished" podID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerID="806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70" exitCode=0 Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.547037 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerDied","Data":"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70"} Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.547050 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vt4pr" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.547075 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vt4pr" event={"ID":"34fba9c5-ea99-4bd5-83bf-0de03a245baa","Type":"ContainerDied","Data":"d91354095c6086af15c4d8f30f99aac51294d2713b5d178034c99090d669594c"} Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.547094 4702 scope.go:117] "RemoveContainer" containerID="806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.567546 4702 scope.go:117] "RemoveContainer" containerID="4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.582399 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities\") pod \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.582789 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content\") pod \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.582893 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p66tf\" (UniqueName: \"kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf\") pod \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\" (UID: \"34fba9c5-ea99-4bd5-83bf-0de03a245baa\") " Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.583155 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities" (OuterVolumeSpecName: "utilities") pod "34fba9c5-ea99-4bd5-83bf-0de03a245baa" (UID: "34fba9c5-ea99-4bd5-83bf-0de03a245baa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.583341 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.594478 4702 scope.go:117] "RemoveContainer" containerID="6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.596485 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf" (OuterVolumeSpecName: "kube-api-access-p66tf") pod "34fba9c5-ea99-4bd5-83bf-0de03a245baa" (UID: "34fba9c5-ea99-4bd5-83bf-0de03a245baa"). InnerVolumeSpecName "kube-api-access-p66tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.627711 4702 scope.go:117] "RemoveContainer" containerID="806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70" Nov 24 18:02:17 crc kubenswrapper[4702]: E1124 18:02:17.628146 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70\": container with ID starting with 806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70 not found: ID does not exist" containerID="806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.628183 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70"} err="failed to get container status \"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70\": rpc error: code = NotFound desc = could not find container \"806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70\": container with ID starting with 806ee07707c17e03b4733e6a61d3024bdb108043678eb33964a4adc88bb0bf70 not found: ID does not exist" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.628218 4702 scope.go:117] "RemoveContainer" containerID="4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d" Nov 24 18:02:17 crc kubenswrapper[4702]: E1124 18:02:17.630998 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d\": container with ID starting with 4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d not found: ID does not exist" containerID="4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.631042 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d"} err="failed to get container status \"4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d\": rpc error: code = NotFound desc = could not find container \"4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d\": container with ID starting with 4781ffcb761a2b4e265934adaa5735dda733e662eaf3bcc9d7c5f4c63051603d not found: ID does not exist" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.631072 4702 scope.go:117] "RemoveContainer" containerID="6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.631344 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "34fba9c5-ea99-4bd5-83bf-0de03a245baa" (UID: "34fba9c5-ea99-4bd5-83bf-0de03a245baa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:17 crc kubenswrapper[4702]: E1124 18:02:17.631576 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab\": container with ID starting with 6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab not found: ID does not exist" containerID="6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.631595 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab"} err="failed to get container status \"6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab\": rpc error: code = NotFound desc = could not find container \"6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab\": container with ID starting with 6907e3a5739450defc8a58da7a1ae996461ef5c9bfdd7da169da56e6c56da7ab not found: ID does not exist" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.684091 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fba9c5-ea99-4bd5-83bf-0de03a245baa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.684123 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p66tf\" (UniqueName: \"kubernetes.io/projected/34fba9c5-ea99-4bd5-83bf-0de03a245baa-kube-api-access-p66tf\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.864387 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:17 crc kubenswrapper[4702]: I1124 18:02:17.872892 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vt4pr"] Nov 24 18:02:19 crc kubenswrapper[4702]: I1124 18:02:19.655337 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" path="/var/lib/kubelet/pods/34fba9c5-ea99-4bd5-83bf-0de03a245baa/volumes" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.896767 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf"] Nov 24 18:02:20 crc kubenswrapper[4702]: E1124 18:02:20.897479 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="registry-server" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.897495 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="registry-server" Nov 24 18:02:20 crc kubenswrapper[4702]: E1124 18:02:20.897525 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="extract-utilities" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.897532 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="extract-utilities" Nov 24 18:02:20 crc kubenswrapper[4702]: E1124 18:02:20.897547 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="extract-content" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.897555 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="extract-content" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.897677 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="34fba9c5-ea99-4bd5-83bf-0de03a245baa" containerName="registry-server" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.898245 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.903602 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-t8jsz" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.905672 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf"] Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.923488 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w256s\" (UniqueName: \"kubernetes.io/projected/2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6-kube-api-access-w256s\") pod \"rabbitmq-cluster-operator-779fc9694b-8fmcf\" (UID: \"2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.993984 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:20 crc kubenswrapper[4702]: I1124 18:02:20.994038 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.024875 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w256s\" (UniqueName: \"kubernetes.io/projected/2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6-kube-api-access-w256s\") pod \"rabbitmq-cluster-operator-779fc9694b-8fmcf\" (UID: \"2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.036368 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.045172 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w256s\" (UniqueName: \"kubernetes.io/projected/2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6-kube-api-access-w256s\") pod \"rabbitmq-cluster-operator-779fc9694b-8fmcf\" (UID: \"2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.215219 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.386202 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf"] Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.572450 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" event={"ID":"2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6","Type":"ContainerStarted","Data":"ea231e639582dd251a2957f65457082599aba707f1aa977ab42d8f0ae15a46e0"} Nov 24 18:02:21 crc kubenswrapper[4702]: I1124 18:02:21.614206 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:22 crc kubenswrapper[4702]: I1124 18:02:22.777455 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:22 crc kubenswrapper[4702]: I1124 18:02:22.778334 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:22 crc kubenswrapper[4702]: I1124 18:02:22.819790 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:23 crc kubenswrapper[4702]: I1124 18:02:23.621059 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:25 crc kubenswrapper[4702]: I1124 18:02:25.604443 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" event={"ID":"2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6","Type":"ContainerStarted","Data":"add33dd7de4651f9d38e3788195755da079f9c2d4d7500faf8c6204fa006172c"} Nov 24 18:02:25 crc kubenswrapper[4702]: I1124 18:02:25.619044 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-8fmcf" podStartSLOduration=2.566295701 podStartE2EDuration="5.619022428s" podCreationTimestamp="2025-11-24 18:02:20 +0000 UTC" firstStartedPulling="2025-11-24 18:02:21.393057993 +0000 UTC m=+830.633799157" lastFinishedPulling="2025-11-24 18:02:24.44578472 +0000 UTC m=+833.686525884" observedRunningTime="2025-11-24 18:02:25.618271558 +0000 UTC m=+834.859012742" watchObservedRunningTime="2025-11-24 18:02:25.619022428 +0000 UTC m=+834.859763592" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.576239 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.577545 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.579115 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-default-user" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.579555 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"rabbitmq-plugins-conf" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.579641 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-erlang-cookie" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.580240 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"rabbitmq-server-conf" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.580249 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-server-dockercfg-gwsg2" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.586102 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.719960 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ecd2bb28-4395-494d-944a-7f25b22e1561-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720462 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720523 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ecd2bb28-4395-494d-944a-7f25b22e1561-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720548 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2wrc\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-kube-api-access-z2wrc\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720622 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ecd2bb28-4395-494d-944a-7f25b22e1561-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720707 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720788 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.720850 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822546 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ecd2bb28-4395-494d-944a-7f25b22e1561-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822607 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822642 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822673 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822759 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ecd2bb28-4395-494d-944a-7f25b22e1561-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822773 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822789 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ecd2bb28-4395-494d-944a-7f25b22e1561-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.822827 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2wrc\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-kube-api-access-z2wrc\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.823735 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.823889 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.824413 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ecd2bb28-4395-494d-944a-7f25b22e1561-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.828639 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ecd2bb28-4395-494d-944a-7f25b22e1561-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.829726 4702 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.829767 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ee51371a239af244313a8edb585bb0aab990050007941354cdc72a5b07034f8f/globalmount\"" pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.830139 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.843137 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ecd2bb28-4395-494d-944a-7f25b22e1561-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.845322 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2wrc\" (UniqueName: \"kubernetes.io/projected/ecd2bb28-4395-494d-944a-7f25b22e1561-kube-api-access-z2wrc\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.865231 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ea3442b-911a-4861-9fcc-acfa48a9e683\") pod \"rabbitmq-server-0\" (UID: \"ecd2bb28-4395-494d-944a-7f25b22e1561\") " pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:27 crc kubenswrapper[4702]: I1124 18:02:27.896055 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:02:28 crc kubenswrapper[4702]: I1124 18:02:28.295443 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Nov 24 18:02:28 crc kubenswrapper[4702]: W1124 18:02:28.303587 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podecd2bb28_4395_494d_944a_7f25b22e1561.slice/crio-c6459454f457cacb63ecf511943a20252b572aa14a45997bba19dc8c80f592cf WatchSource:0}: Error finding container c6459454f457cacb63ecf511943a20252b572aa14a45997bba19dc8c80f592cf: Status 404 returned error can't find the container with id c6459454f457cacb63ecf511943a20252b572aa14a45997bba19dc8c80f592cf Nov 24 18:02:28 crc kubenswrapper[4702]: I1124 18:02:28.629840 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"ecd2bb28-4395-494d-944a-7f25b22e1561","Type":"ContainerStarted","Data":"c6459454f457cacb63ecf511943a20252b572aa14a45997bba19dc8c80f592cf"} Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.049173 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.049534 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8qdwj" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="registry-server" containerID="cri-o://927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333" gracePeriod=2 Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.463494 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.640022 4702 generic.go:334] "Generic (PLEG): container finished" podID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerID="927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333" exitCode=0 Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.640077 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerDied","Data":"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333"} Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.640092 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qdwj" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.640116 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qdwj" event={"ID":"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38","Type":"ContainerDied","Data":"77edc54c5fb72c35206ee596a145bfa5ee24c3ccd6dda97c6bbed2956a824e8e"} Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.640135 4702 scope.go:117] "RemoveContainer" containerID="927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.649279 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities\") pod \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.650437 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities" (OuterVolumeSpecName: "utilities") pod "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" (UID: "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.651207 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content\") pod \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.651252 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7x4d\" (UniqueName: \"kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d\") pod \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\" (UID: \"fe255cb7-ae4c-4bfe-b71e-6ae78e570d38\") " Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.651623 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.657456 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d" (OuterVolumeSpecName: "kube-api-access-f7x4d") pod "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" (UID: "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38"). InnerVolumeSpecName "kube-api-access-f7x4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.662944 4702 scope.go:117] "RemoveContainer" containerID="e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.673458 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" (UID: "fe255cb7-ae4c-4bfe-b71e-6ae78e570d38"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.689864 4702 scope.go:117] "RemoveContainer" containerID="8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.713307 4702 scope.go:117] "RemoveContainer" containerID="927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333" Nov 24 18:02:29 crc kubenswrapper[4702]: E1124 18:02:29.713689 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333\": container with ID starting with 927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333 not found: ID does not exist" containerID="927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.713718 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333"} err="failed to get container status \"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333\": rpc error: code = NotFound desc = could not find container \"927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333\": container with ID starting with 927aa47d700301a4d1dc3eaf6e1a95389436e247375e307b83c49fb2e8b5e333 not found: ID does not exist" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.713739 4702 scope.go:117] "RemoveContainer" containerID="e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6" Nov 24 18:02:29 crc kubenswrapper[4702]: E1124 18:02:29.714136 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6\": container with ID starting with e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6 not found: ID does not exist" containerID="e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.714180 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6"} err="failed to get container status \"e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6\": rpc error: code = NotFound desc = could not find container \"e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6\": container with ID starting with e9946a80e2c7a838b082cf79a92f31dfbc5c1c7a9cb8fca2dfd42fa98583ffa6 not found: ID does not exist" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.714222 4702 scope.go:117] "RemoveContainer" containerID="8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122" Nov 24 18:02:29 crc kubenswrapper[4702]: E1124 18:02:29.714745 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122\": container with ID starting with 8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122 not found: ID does not exist" containerID="8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.714764 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122"} err="failed to get container status \"8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122\": rpc error: code = NotFound desc = could not find container \"8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122\": container with ID starting with 8d643a5c38eb791e4c84c5cb500f682fbb1b02db35b9a5fe74463d53e854c122 not found: ID does not exist" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.753492 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.753553 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7x4d\" (UniqueName: \"kubernetes.io/projected/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38-kube-api-access-f7x4d\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.977396 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:29 crc kubenswrapper[4702]: I1124 18:02:29.981159 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qdwj"] Nov 24 18:02:30 crc kubenswrapper[4702]: I1124 18:02:30.050671 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:30 crc kubenswrapper[4702]: I1124 18:02:30.050966 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bp6qf" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="registry-server" containerID="cri-o://f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" gracePeriod=2 Nov 24 18:02:30 crc kubenswrapper[4702]: I1124 18:02:30.651768 4702 generic.go:334] "Generic (PLEG): container finished" podID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerID="f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" exitCode=0 Nov 24 18:02:30 crc kubenswrapper[4702]: I1124 18:02:30.651833 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerDied","Data":"f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4"} Nov 24 18:02:30 crc kubenswrapper[4702]: E1124 18:02:30.994867 4702 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4 is running failed: container process not found" containerID="f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 18:02:30 crc kubenswrapper[4702]: E1124 18:02:30.995305 4702 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4 is running failed: container process not found" containerID="f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 18:02:30 crc kubenswrapper[4702]: E1124 18:02:30.995616 4702 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4 is running failed: container process not found" containerID="f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 18:02:30 crc kubenswrapper[4702]: E1124 18:02:30.995676 4702 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-bp6qf" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="registry-server" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.003791 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.193106 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities\") pod \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.193192 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vl99\" (UniqueName: \"kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99\") pod \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.193284 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content\") pod \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\" (UID: \"24937c87-8ce1-486c-bfc9-0dcd287a31a9\") " Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.194294 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities" (OuterVolumeSpecName: "utilities") pod "24937c87-8ce1-486c-bfc9-0dcd287a31a9" (UID: "24937c87-8ce1-486c-bfc9-0dcd287a31a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.197788 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99" (OuterVolumeSpecName: "kube-api-access-4vl99") pod "24937c87-8ce1-486c-bfc9-0dcd287a31a9" (UID: "24937c87-8ce1-486c-bfc9-0dcd287a31a9"). InnerVolumeSpecName "kube-api-access-4vl99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.280553 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24937c87-8ce1-486c-bfc9-0dcd287a31a9" (UID: "24937c87-8ce1-486c-bfc9-0dcd287a31a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.294962 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vl99\" (UniqueName: \"kubernetes.io/projected/24937c87-8ce1-486c-bfc9-0dcd287a31a9-kube-api-access-4vl99\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.295004 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.295017 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24937c87-8ce1-486c-bfc9-0dcd287a31a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.655305 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" path="/var/lib/kubelet/pods/fe255cb7-ae4c-4bfe-b71e-6ae78e570d38/volumes" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.660900 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bp6qf" event={"ID":"24937c87-8ce1-486c-bfc9-0dcd287a31a9","Type":"ContainerDied","Data":"39204b6116c2845a85af73215486327031c73b34920c1a9713ac188e4bb31296"} Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.660956 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bp6qf" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.660977 4702 scope.go:117] "RemoveContainer" containerID="f7aa743b973cb612b062e06962f913cf391fef0b421bde36f69d743f755e8ea4" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.682622 4702 scope.go:117] "RemoveContainer" containerID="586dc48e7df04272e16b979292d538a314d2f767f85664ec036c7a19d415ccf3" Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.693589 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.698936 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bp6qf"] Nov 24 18:02:31 crc kubenswrapper[4702]: I1124 18:02:31.701206 4702 scope.go:117] "RemoveContainer" containerID="ec9d7acc01e05fa6053bcd4e2951d424b226986cfa0e076172fcf16a9282e2cb" Nov 24 18:02:33 crc kubenswrapper[4702]: I1124 18:02:33.657789 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" path="/var/lib/kubelet/pods/24937c87-8ce1-486c-bfc9-0dcd287a31a9/volumes" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.050755 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-xdjp4"] Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051293 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051312 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051332 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051340 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051359 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="extract-content" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051369 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="extract-content" Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051383 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="extract-utilities" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051391 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="extract-utilities" Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051406 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="extract-utilities" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051414 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="extract-utilities" Nov 24 18:02:34 crc kubenswrapper[4702]: E1124 18:02:34.051430 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="extract-content" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051438 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="extract-content" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051591 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe255cb7-ae4c-4bfe-b71e-6ae78e570d38" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.051607 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="24937c87-8ce1-486c-bfc9-0dcd287a31a9" containerName="registry-server" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.052251 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.056289 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-dnznd" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.057668 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-xdjp4"] Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.132062 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw8wd\" (UniqueName: \"kubernetes.io/projected/bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1-kube-api-access-pw8wd\") pod \"keystone-operator-index-xdjp4\" (UID: \"bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1\") " pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.233326 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw8wd\" (UniqueName: \"kubernetes.io/projected/bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1-kube-api-access-pw8wd\") pod \"keystone-operator-index-xdjp4\" (UID: \"bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1\") " pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.253156 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw8wd\" (UniqueName: \"kubernetes.io/projected/bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1-kube-api-access-pw8wd\") pod \"keystone-operator-index-xdjp4\" (UID: \"bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1\") " pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:34 crc kubenswrapper[4702]: I1124 18:02:34.378480 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:35 crc kubenswrapper[4702]: I1124 18:02:35.503620 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-xdjp4"] Nov 24 18:02:35 crc kubenswrapper[4702]: W1124 18:02:35.509340 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd669ceb_92b4_4d8a_b2f0_94ee4bdc5df1.slice/crio-9a4b474a3f2d5d32d44ded8ad95b2086dbba52f6ff220bca07c7498f87e82ae8 WatchSource:0}: Error finding container 9a4b474a3f2d5d32d44ded8ad95b2086dbba52f6ff220bca07c7498f87e82ae8: Status 404 returned error can't find the container with id 9a4b474a3f2d5d32d44ded8ad95b2086dbba52f6ff220bca07c7498f87e82ae8 Nov 24 18:02:35 crc kubenswrapper[4702]: I1124 18:02:35.688696 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-xdjp4" event={"ID":"bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1","Type":"ContainerStarted","Data":"9a4b474a3f2d5d32d44ded8ad95b2086dbba52f6ff220bca07c7498f87e82ae8"} Nov 24 18:02:36 crc kubenswrapper[4702]: I1124 18:02:36.696032 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"ecd2bb28-4395-494d-944a-7f25b22e1561","Type":"ContainerStarted","Data":"53b91aea5de7793217bbca979aaf8884cce2658fe90704ccd0febdc2d0a2f49e"} Nov 24 18:02:37 crc kubenswrapper[4702]: I1124 18:02:37.702691 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-xdjp4" event={"ID":"bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1","Type":"ContainerStarted","Data":"eddbabd2242b74ffc12647643061ce80fbea3313bc5515aeaa71f2dae8021208"} Nov 24 18:02:37 crc kubenswrapper[4702]: I1124 18:02:37.716616 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-xdjp4" podStartSLOduration=1.7317760340000001 podStartE2EDuration="3.716583s" podCreationTimestamp="2025-11-24 18:02:34 +0000 UTC" firstStartedPulling="2025-11-24 18:02:35.511417647 +0000 UTC m=+844.752158811" lastFinishedPulling="2025-11-24 18:02:37.496224613 +0000 UTC m=+846.736965777" observedRunningTime="2025-11-24 18:02:37.715599884 +0000 UTC m=+846.956341048" watchObservedRunningTime="2025-11-24 18:02:37.716583 +0000 UTC m=+846.957324164" Nov 24 18:02:44 crc kubenswrapper[4702]: I1124 18:02:44.378626 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:44 crc kubenswrapper[4702]: I1124 18:02:44.379943 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:44 crc kubenswrapper[4702]: I1124 18:02:44.405417 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:44 crc kubenswrapper[4702]: I1124 18:02:44.767831 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-xdjp4" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.284704 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm"] Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.286181 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.288415 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.297138 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm"] Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.432817 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.432932 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4cb2\" (UniqueName: \"kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.432963 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.534015 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4cb2\" (UniqueName: \"kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.534079 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.534227 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.534689 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.534711 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.553558 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4cb2\" (UniqueName: \"kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2\") pod \"62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.604282 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:49 crc kubenswrapper[4702]: I1124 18:02:49.784764 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm"] Nov 24 18:02:49 crc kubenswrapper[4702]: W1124 18:02:49.787431 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4880862_6c76_47c4_9046_cf95cf711d7e.slice/crio-00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6 WatchSource:0}: Error finding container 00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6: Status 404 returned error can't find the container with id 00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6 Nov 24 18:02:50 crc kubenswrapper[4702]: I1124 18:02:50.776975 4702 generic.go:334] "Generic (PLEG): container finished" podID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerID="e1b5d3f5470899af64e591f680ec5be825e95666caa80b6f9175bde6e6c437be" exitCode=0 Nov 24 18:02:50 crc kubenswrapper[4702]: I1124 18:02:50.777023 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" event={"ID":"a4880862-6c76-47c4-9046-cf95cf711d7e","Type":"ContainerDied","Data":"e1b5d3f5470899af64e591f680ec5be825e95666caa80b6f9175bde6e6c437be"} Nov 24 18:02:50 crc kubenswrapper[4702]: I1124 18:02:50.777304 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" event={"ID":"a4880862-6c76-47c4-9046-cf95cf711d7e","Type":"ContainerStarted","Data":"00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6"} Nov 24 18:02:52 crc kubenswrapper[4702]: I1124 18:02:52.790657 4702 generic.go:334] "Generic (PLEG): container finished" podID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerID="47ef32ff04a852098062280bfa252744a4de5ee0422f74cc808a763753cd3eb2" exitCode=0 Nov 24 18:02:52 crc kubenswrapper[4702]: I1124 18:02:52.790722 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" event={"ID":"a4880862-6c76-47c4-9046-cf95cf711d7e","Type":"ContainerDied","Data":"47ef32ff04a852098062280bfa252744a4de5ee0422f74cc808a763753cd3eb2"} Nov 24 18:02:53 crc kubenswrapper[4702]: I1124 18:02:53.799357 4702 generic.go:334] "Generic (PLEG): container finished" podID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerID="e356e2464f61bc0e31189c10db7fa47f76d8087e05c8aed0c756eefb03ba6b2b" exitCode=0 Nov 24 18:02:53 crc kubenswrapper[4702]: I1124 18:02:53.799396 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" event={"ID":"a4880862-6c76-47c4-9046-cf95cf711d7e","Type":"ContainerDied","Data":"e356e2464f61bc0e31189c10db7fa47f76d8087e05c8aed0c756eefb03ba6b2b"} Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.021722 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.203780 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle\") pod \"a4880862-6c76-47c4-9046-cf95cf711d7e\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.204158 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util\") pod \"a4880862-6c76-47c4-9046-cf95cf711d7e\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.204254 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4cb2\" (UniqueName: \"kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2\") pod \"a4880862-6c76-47c4-9046-cf95cf711d7e\" (UID: \"a4880862-6c76-47c4-9046-cf95cf711d7e\") " Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.204469 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle" (OuterVolumeSpecName: "bundle") pod "a4880862-6c76-47c4-9046-cf95cf711d7e" (UID: "a4880862-6c76-47c4-9046-cf95cf711d7e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.205634 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.210503 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2" (OuterVolumeSpecName: "kube-api-access-d4cb2") pod "a4880862-6c76-47c4-9046-cf95cf711d7e" (UID: "a4880862-6c76-47c4-9046-cf95cf711d7e"). InnerVolumeSpecName "kube-api-access-d4cb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.218105 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util" (OuterVolumeSpecName: "util") pod "a4880862-6c76-47c4-9046-cf95cf711d7e" (UID: "a4880862-6c76-47c4-9046-cf95cf711d7e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.306981 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a4880862-6c76-47c4-9046-cf95cf711d7e-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.307011 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4cb2\" (UniqueName: \"kubernetes.io/projected/a4880862-6c76-47c4-9046-cf95cf711d7e-kube-api-access-d4cb2\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.812577 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" event={"ID":"a4880862-6c76-47c4-9046-cf95cf711d7e","Type":"ContainerDied","Data":"00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6"} Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.812616 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00106d38dadeb1c56e268d17b4dc5a4e170bd34fd93397a1716a6914a64c6ae6" Nov 24 18:02:55 crc kubenswrapper[4702]: I1124 18:02:55.812643 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.094177 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl"] Nov 24 18:03:07 crc kubenswrapper[4702]: E1124 18:03:07.095321 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="pull" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.095338 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="pull" Nov 24 18:03:07 crc kubenswrapper[4702]: E1124 18:03:07.095348 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="extract" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.095354 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="extract" Nov 24 18:03:07 crc kubenswrapper[4702]: E1124 18:03:07.095378 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="util" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.095399 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="util" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.095562 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4880862-6c76-47c4-9046-cf95cf711d7e" containerName="extract" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.096244 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.101052 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.101838 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-z7bcg" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.117759 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl"] Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.155393 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g64hg\" (UniqueName: \"kubernetes.io/projected/0857a270-3c6c-4e5d-b7f9-4589b12beec7-kube-api-access-g64hg\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.155578 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-apiservice-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.155604 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-webhook-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.256942 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-webhook-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.257018 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-apiservice-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.257052 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g64hg\" (UniqueName: \"kubernetes.io/projected/0857a270-3c6c-4e5d-b7f9-4589b12beec7-kube-api-access-g64hg\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.264785 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-apiservice-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.270435 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0857a270-3c6c-4e5d-b7f9-4589b12beec7-webhook-cert\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.275563 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g64hg\" (UniqueName: \"kubernetes.io/projected/0857a270-3c6c-4e5d-b7f9-4589b12beec7-kube-api-access-g64hg\") pod \"keystone-operator-controller-manager-7c8667cbc8-rdxfl\" (UID: \"0857a270-3c6c-4e5d-b7f9-4589b12beec7\") " pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.415084 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.810458 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl"] Nov 24 18:03:07 crc kubenswrapper[4702]: I1124 18:03:07.883824 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" event={"ID":"0857a270-3c6c-4e5d-b7f9-4589b12beec7","Type":"ContainerStarted","Data":"1d9f267f7cb7f1872853fbb341e38d3afbb038d86e4c66f705dad851eb8ea1f1"} Nov 24 18:03:08 crc kubenswrapper[4702]: I1124 18:03:08.889308 4702 generic.go:334] "Generic (PLEG): container finished" podID="ecd2bb28-4395-494d-944a-7f25b22e1561" containerID="53b91aea5de7793217bbca979aaf8884cce2658fe90704ccd0febdc2d0a2f49e" exitCode=0 Nov 24 18:03:08 crc kubenswrapper[4702]: I1124 18:03:08.889389 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"ecd2bb28-4395-494d-944a-7f25b22e1561","Type":"ContainerDied","Data":"53b91aea5de7793217bbca979aaf8884cce2658fe90704ccd0febdc2d0a2f49e"} Nov 24 18:03:09 crc kubenswrapper[4702]: I1124 18:03:09.898839 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"ecd2bb28-4395-494d-944a-7f25b22e1561","Type":"ContainerStarted","Data":"e9cb7a69b9c82da1e0edb9d0e3c11c613018f3c8a7431f29a22d637c7a977b21"} Nov 24 18:03:09 crc kubenswrapper[4702]: I1124 18:03:09.899720 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:03:09 crc kubenswrapper[4702]: I1124 18:03:09.920439 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/rabbitmq-server-0" podStartSLOduration=37.043126917 podStartE2EDuration="43.920419194s" podCreationTimestamp="2025-11-24 18:02:26 +0000 UTC" firstStartedPulling="2025-11-24 18:02:28.307662137 +0000 UTC m=+837.548403301" lastFinishedPulling="2025-11-24 18:02:35.184954414 +0000 UTC m=+844.425695578" observedRunningTime="2025-11-24 18:03:09.917391532 +0000 UTC m=+879.158132726" watchObservedRunningTime="2025-11-24 18:03:09.920419194 +0000 UTC m=+879.161160358" Nov 24 18:03:12 crc kubenswrapper[4702]: I1124 18:03:12.920046 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" event={"ID":"0857a270-3c6c-4e5d-b7f9-4589b12beec7","Type":"ContainerStarted","Data":"44bf2e4f0c5e1770580897690cc444f41602ed85b25cfeb664b24a4b052eb812"} Nov 24 18:03:12 crc kubenswrapper[4702]: I1124 18:03:12.920389 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:12 crc kubenswrapper[4702]: I1124 18:03:12.938262 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" podStartSLOduration=1.696924488 podStartE2EDuration="5.93824686s" podCreationTimestamp="2025-11-24 18:03:07 +0000 UTC" firstStartedPulling="2025-11-24 18:03:07.82156991 +0000 UTC m=+877.062311074" lastFinishedPulling="2025-11-24 18:03:12.062892282 +0000 UTC m=+881.303633446" observedRunningTime="2025-11-24 18:03:12.93603722 +0000 UTC m=+882.176778404" watchObservedRunningTime="2025-11-24 18:03:12.93824686 +0000 UTC m=+882.178988024" Nov 24 18:03:17 crc kubenswrapper[4702]: I1124 18:03:17.419603 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7c8667cbc8-rdxfl" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.401892 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-0eaa-account-create-update-594nr"] Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.403076 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.405113 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-db-secret" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.414495 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-0eaa-account-create-update-594nr"] Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.498789 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-db-create-lh7m6"] Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.499611 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.504674 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-create-lh7m6"] Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.553427 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xqrc\" (UniqueName: \"kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.553501 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.657365 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.657493 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t62m2\" (UniqueName: \"kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.657628 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xqrc\" (UniqueName: \"kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.657661 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.658557 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.682522 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xqrc\" (UniqueName: \"kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc\") pod \"keystone-0eaa-account-create-update-594nr\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.724760 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.758950 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.759001 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t62m2\" (UniqueName: \"kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.760165 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.778073 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t62m2\" (UniqueName: \"kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2\") pod \"keystone-db-create-lh7m6\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:21 crc kubenswrapper[4702]: I1124 18:03:21.815356 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.153328 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-0eaa-account-create-update-594nr"] Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.222036 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-create-lh7m6"] Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.483118 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.483449 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.982316 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" event={"ID":"73321ea7-21d6-42e7-be3f-311cbcc3dfed","Type":"ContainerStarted","Data":"95fb5c70839312f17f0eb20ef6ff9f821f4fc38946c3609242c734e807e32584"} Nov 24 18:03:22 crc kubenswrapper[4702]: I1124 18:03:22.984197 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-lh7m6" event={"ID":"f0d6d4fb-8a29-4170-bba6-5416855a535a","Type":"ContainerStarted","Data":"eacebbc3c3e6a07d3987151eb2be63181adde1a4a1f5f32d6ebf67ed819870cd"} Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.250397 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-index-sxlqv"] Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.251813 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.253985 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-index-dockercfg-rnztg" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.261778 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-sxlqv"] Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.392252 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc424\" (UniqueName: \"kubernetes.io/projected/81f6ab0d-36cf-46bd-8d94-cb790654a1b0-kube-api-access-tc424\") pod \"horizon-operator-index-sxlqv\" (UID: \"81f6ab0d-36cf-46bd-8d94-cb790654a1b0\") " pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.493609 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc424\" (UniqueName: \"kubernetes.io/projected/81f6ab0d-36cf-46bd-8d94-cb790654a1b0-kube-api-access-tc424\") pod \"horizon-operator-index-sxlqv\" (UID: \"81f6ab0d-36cf-46bd-8d94-cb790654a1b0\") " pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.519103 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc424\" (UniqueName: \"kubernetes.io/projected/81f6ab0d-36cf-46bd-8d94-cb790654a1b0-kube-api-access-tc424\") pod \"horizon-operator-index-sxlqv\" (UID: \"81f6ab0d-36cf-46bd-8d94-cb790654a1b0\") " pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.570141 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:24 crc kubenswrapper[4702]: I1124 18:03:24.986368 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-sxlqv"] Nov 24 18:03:25 crc kubenswrapper[4702]: W1124 18:03:24.996781 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81f6ab0d_36cf_46bd_8d94_cb790654a1b0.slice/crio-ee348f283a7c24d983c878746a543ff57b21d6a3e35ef5c79a9881f0afd1eb88 WatchSource:0}: Error finding container ee348f283a7c24d983c878746a543ff57b21d6a3e35ef5c79a9881f0afd1eb88: Status 404 returned error can't find the container with id ee348f283a7c24d983c878746a543ff57b21d6a3e35ef5c79a9881f0afd1eb88 Nov 24 18:03:26 crc kubenswrapper[4702]: I1124 18:03:26.007209 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-sxlqv" event={"ID":"81f6ab0d-36cf-46bd-8d94-cb790654a1b0","Type":"ContainerStarted","Data":"ee348f283a7c24d983c878746a543ff57b21d6a3e35ef5c79a9881f0afd1eb88"} Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.445821 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.446899 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.449664 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-index-dockercfg-sqlv5" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.460501 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.535630 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbhkv\" (UniqueName: \"kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv\") pod \"swift-operator-index-mxl7g\" (UID: \"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c\") " pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.637565 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbhkv\" (UniqueName: \"kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv\") pod \"swift-operator-index-mxl7g\" (UID: \"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c\") " pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.658570 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbhkv\" (UniqueName: \"kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv\") pod \"swift-operator-index-mxl7g\" (UID: \"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c\") " pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.769150 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:27.899004 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/rabbitmq-server-0" Nov 24 18:03:30 crc kubenswrapper[4702]: I1124 18:03:30.664325 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:30 crc kubenswrapper[4702]: W1124 18:03:30.673009 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bddbf7a_3653_4d07_b5cc_ac1860f8a20c.slice/crio-38c9d1e9333a7bb0a90d2a82479905876bceba1dd255982a25674fddbf1a0cf3 WatchSource:0}: Error finding container 38c9d1e9333a7bb0a90d2a82479905876bceba1dd255982a25674fddbf1a0cf3: Status 404 returned error can't find the container with id 38c9d1e9333a7bb0a90d2a82479905876bceba1dd255982a25674fddbf1a0cf3 Nov 24 18:03:31 crc kubenswrapper[4702]: I1124 18:03:31.041344 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" event={"ID":"73321ea7-21d6-42e7-be3f-311cbcc3dfed","Type":"ContainerStarted","Data":"771fb315b724b66aea83d0565100ca3af55c5bb493f27eebd99db50072d5ae77"} Nov 24 18:03:31 crc kubenswrapper[4702]: I1124 18:03:31.042516 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-mxl7g" event={"ID":"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c","Type":"ContainerStarted","Data":"38c9d1e9333a7bb0a90d2a82479905876bceba1dd255982a25674fddbf1a0cf3"} Nov 24 18:03:31 crc kubenswrapper[4702]: I1124 18:03:31.043887 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-lh7m6" event={"ID":"f0d6d4fb-8a29-4170-bba6-5416855a535a","Type":"ContainerStarted","Data":"9287b841b67760c829ae21cf4a51e1b835940f4a03cee658252334da218492ae"} Nov 24 18:03:32 crc kubenswrapper[4702]: I1124 18:03:32.068963 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-db-create-lh7m6" podStartSLOduration=11.068940305 podStartE2EDuration="11.068940305s" podCreationTimestamp="2025-11-24 18:03:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:03:32.064342832 +0000 UTC m=+901.305083996" watchObservedRunningTime="2025-11-24 18:03:32.068940305 +0000 UTC m=+901.309681479" Nov 24 18:03:32 crc kubenswrapper[4702]: I1124 18:03:32.079676 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" podStartSLOduration=11.079651393 podStartE2EDuration="11.079651393s" podCreationTimestamp="2025-11-24 18:03:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:03:32.079162801 +0000 UTC m=+901.319903965" watchObservedRunningTime="2025-11-24 18:03:32.079651393 +0000 UTC m=+901.320392567" Nov 24 18:03:32 crc kubenswrapper[4702]: I1124 18:03:32.839391 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.059148 4702 generic.go:334] "Generic (PLEG): container finished" podID="f0d6d4fb-8a29-4170-bba6-5416855a535a" containerID="9287b841b67760c829ae21cf4a51e1b835940f4a03cee658252334da218492ae" exitCode=0 Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.059195 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-lh7m6" event={"ID":"f0d6d4fb-8a29-4170-bba6-5416855a535a","Type":"ContainerDied","Data":"9287b841b67760c829ae21cf4a51e1b835940f4a03cee658252334da218492ae"} Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.061081 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-sxlqv" event={"ID":"81f6ab0d-36cf-46bd-8d94-cb790654a1b0","Type":"ContainerStarted","Data":"d59ba20fb820548636044ecf928a754db714ca469c095676cdba42035869d4e8"} Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.063272 4702 generic.go:334] "Generic (PLEG): container finished" podID="73321ea7-21d6-42e7-be3f-311cbcc3dfed" containerID="771fb315b724b66aea83d0565100ca3af55c5bb493f27eebd99db50072d5ae77" exitCode=0 Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.063312 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" event={"ID":"73321ea7-21d6-42e7-be3f-311cbcc3dfed","Type":"ContainerDied","Data":"771fb315b724b66aea83d0565100ca3af55c5bb493f27eebd99db50072d5ae77"} Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.103274 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-index-sxlqv" podStartSLOduration=1.3835691940000001 podStartE2EDuration="9.10325254s" podCreationTimestamp="2025-11-24 18:03:24 +0000 UTC" firstStartedPulling="2025-11-24 18:03:24.99868316 +0000 UTC m=+894.239424324" lastFinishedPulling="2025-11-24 18:03:32.718366506 +0000 UTC m=+901.959107670" observedRunningTime="2025-11-24 18:03:33.086936232 +0000 UTC m=+902.327677396" watchObservedRunningTime="2025-11-24 18:03:33.10325254 +0000 UTC m=+902.343993704" Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.665772 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-index-w2sb2"] Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.666776 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.671591 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-w2sb2"] Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.822386 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrht7\" (UniqueName: \"kubernetes.io/projected/00cf497b-b7bb-4450-b5fd-fad217daba30-kube-api-access-lrht7\") pod \"swift-operator-index-w2sb2\" (UID: \"00cf497b-b7bb-4450-b5fd-fad217daba30\") " pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.923777 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrht7\" (UniqueName: \"kubernetes.io/projected/00cf497b-b7bb-4450-b5fd-fad217daba30-kube-api-access-lrht7\") pod \"swift-operator-index-w2sb2\" (UID: \"00cf497b-b7bb-4450-b5fd-fad217daba30\") " pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.945136 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrht7\" (UniqueName: \"kubernetes.io/projected/00cf497b-b7bb-4450-b5fd-fad217daba30-kube-api-access-lrht7\") pod \"swift-operator-index-w2sb2\" (UID: \"00cf497b-b7bb-4450-b5fd-fad217daba30\") " pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:33 crc kubenswrapper[4702]: I1124 18:03:33.981095 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.433034 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.438008 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:34 crc kubenswrapper[4702]: W1124 18:03:34.461851 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00cf497b_b7bb_4450_b5fd_fad217daba30.slice/crio-76e8bbf40ac7ec02a264531f0f2684fcdc2a092d0a37c7fbcf2996d7ba13c59b WatchSource:0}: Error finding container 76e8bbf40ac7ec02a264531f0f2684fcdc2a092d0a37c7fbcf2996d7ba13c59b: Status 404 returned error can't find the container with id 76e8bbf40ac7ec02a264531f0f2684fcdc2a092d0a37c7fbcf2996d7ba13c59b Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.462863 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-w2sb2"] Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.532215 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xqrc\" (UniqueName: \"kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc\") pod \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.532257 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t62m2\" (UniqueName: \"kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2\") pod \"f0d6d4fb-8a29-4170-bba6-5416855a535a\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.532280 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts\") pod \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\" (UID: \"73321ea7-21d6-42e7-be3f-311cbcc3dfed\") " Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.532313 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts\") pod \"f0d6d4fb-8a29-4170-bba6-5416855a535a\" (UID: \"f0d6d4fb-8a29-4170-bba6-5416855a535a\") " Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.532933 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "73321ea7-21d6-42e7-be3f-311cbcc3dfed" (UID: "73321ea7-21d6-42e7-be3f-311cbcc3dfed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.533042 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f0d6d4fb-8a29-4170-bba6-5416855a535a" (UID: "f0d6d4fb-8a29-4170-bba6-5416855a535a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.533370 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/73321ea7-21d6-42e7-be3f-311cbcc3dfed-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.533386 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0d6d4fb-8a29-4170-bba6-5416855a535a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.538473 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc" (OuterVolumeSpecName: "kube-api-access-5xqrc") pod "73321ea7-21d6-42e7-be3f-311cbcc3dfed" (UID: "73321ea7-21d6-42e7-be3f-311cbcc3dfed"). InnerVolumeSpecName "kube-api-access-5xqrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.538555 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2" (OuterVolumeSpecName: "kube-api-access-t62m2") pod "f0d6d4fb-8a29-4170-bba6-5416855a535a" (UID: "f0d6d4fb-8a29-4170-bba6-5416855a535a"). InnerVolumeSpecName "kube-api-access-t62m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.570772 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.570914 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.598216 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.634452 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xqrc\" (UniqueName: \"kubernetes.io/projected/73321ea7-21d6-42e7-be3f-311cbcc3dfed-kube-api-access-5xqrc\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:34 crc kubenswrapper[4702]: I1124 18:03:34.634493 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t62m2\" (UniqueName: \"kubernetes.io/projected/f0d6d4fb-8a29-4170-bba6-5416855a535a-kube-api-access-t62m2\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.082003 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-mxl7g" event={"ID":"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c","Type":"ContainerStarted","Data":"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0"} Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.083052 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/swift-operator-index-mxl7g" podUID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" containerName="registry-server" containerID="cri-o://9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0" gracePeriod=2 Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.084758 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-lh7m6" event={"ID":"f0d6d4fb-8a29-4170-bba6-5416855a535a","Type":"ContainerDied","Data":"eacebbc3c3e6a07d3987151eb2be63181adde1a4a1f5f32d6ebf67ed819870cd"} Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.084826 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eacebbc3c3e6a07d3987151eb2be63181adde1a4a1f5f32d6ebf67ed819870cd" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.084788 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-lh7m6" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.087551 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.087608 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-0eaa-account-create-update-594nr" event={"ID":"73321ea7-21d6-42e7-be3f-311cbcc3dfed","Type":"ContainerDied","Data":"95fb5c70839312f17f0eb20ef6ff9f821f4fc38946c3609242c734e807e32584"} Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.087651 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95fb5c70839312f17f0eb20ef6ff9f821f4fc38946c3609242c734e807e32584" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.089115 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-w2sb2" event={"ID":"00cf497b-b7bb-4450-b5fd-fad217daba30","Type":"ContainerStarted","Data":"76e8bbf40ac7ec02a264531f0f2684fcdc2a092d0a37c7fbcf2996d7ba13c59b"} Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.109209 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-index-mxl7g" podStartSLOduration=4.816379461 podStartE2EDuration="8.109189065s" podCreationTimestamp="2025-11-24 18:03:27 +0000 UTC" firstStartedPulling="2025-11-24 18:03:30.67554925 +0000 UTC m=+899.916290414" lastFinishedPulling="2025-11-24 18:03:33.968358854 +0000 UTC m=+903.209100018" observedRunningTime="2025-11-24 18:03:35.104070168 +0000 UTC m=+904.344811352" watchObservedRunningTime="2025-11-24 18:03:35.109189065 +0000 UTC m=+904.349930239" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.563695 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.646677 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbhkv\" (UniqueName: \"kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv\") pod \"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c\" (UID: \"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c\") " Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.651968 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv" (OuterVolumeSpecName: "kube-api-access-sbhkv") pod "0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" (UID: "0bddbf7a-3653-4d07-b5cc-ac1860f8a20c"). InnerVolumeSpecName "kube-api-access-sbhkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:35 crc kubenswrapper[4702]: I1124 18:03:35.748215 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbhkv\" (UniqueName: \"kubernetes.io/projected/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c-kube-api-access-sbhkv\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.096734 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-w2sb2" event={"ID":"00cf497b-b7bb-4450-b5fd-fad217daba30","Type":"ContainerStarted","Data":"3506fb08d930466b8904bb2067dcd6ae0be84a250a48ae0fed7cead6d0d9acb7"} Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.098470 4702 generic.go:334] "Generic (PLEG): container finished" podID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" containerID="9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0" exitCode=0 Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.098520 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-mxl7g" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.098514 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-mxl7g" event={"ID":"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c","Type":"ContainerDied","Data":"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0"} Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.098665 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-mxl7g" event={"ID":"0bddbf7a-3653-4d07-b5cc-ac1860f8a20c","Type":"ContainerDied","Data":"38c9d1e9333a7bb0a90d2a82479905876bceba1dd255982a25674fddbf1a0cf3"} Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.098690 4702 scope.go:117] "RemoveContainer" containerID="9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.115598 4702 scope.go:117] "RemoveContainer" containerID="9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0" Nov 24 18:03:36 crc kubenswrapper[4702]: E1124 18:03:36.116118 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0\": container with ID starting with 9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0 not found: ID does not exist" containerID="9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.116162 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0"} err="failed to get container status \"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0\": rpc error: code = NotFound desc = could not find container \"9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0\": container with ID starting with 9d428db72e6ea81699eea59add513bd0f5d64ecc6dea0a26158965fac30e56c0 not found: ID does not exist" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.117683 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-index-w2sb2" podStartSLOduration=1.918529295 podStartE2EDuration="3.117660655s" podCreationTimestamp="2025-11-24 18:03:33 +0000 UTC" firstStartedPulling="2025-11-24 18:03:34.466951647 +0000 UTC m=+903.707692811" lastFinishedPulling="2025-11-24 18:03:35.666083017 +0000 UTC m=+904.906824171" observedRunningTime="2025-11-24 18:03:36.113871543 +0000 UTC m=+905.354612717" watchObservedRunningTime="2025-11-24 18:03:36.117660655 +0000 UTC m=+905.358401829" Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.127069 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:36 crc kubenswrapper[4702]: I1124 18:03:36.130157 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/swift-operator-index-mxl7g"] Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092010 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-db-sync-4k497"] Nov 24 18:03:37 crc kubenswrapper[4702]: E1124 18:03:37.092267 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" containerName="registry-server" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092278 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" containerName="registry-server" Nov 24 18:03:37 crc kubenswrapper[4702]: E1124 18:03:37.092293 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73321ea7-21d6-42e7-be3f-311cbcc3dfed" containerName="mariadb-account-create-update" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092300 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="73321ea7-21d6-42e7-be3f-311cbcc3dfed" containerName="mariadb-account-create-update" Nov 24 18:03:37 crc kubenswrapper[4702]: E1124 18:03:37.092311 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0d6d4fb-8a29-4170-bba6-5416855a535a" containerName="mariadb-database-create" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092317 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0d6d4fb-8a29-4170-bba6-5416855a535a" containerName="mariadb-database-create" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092419 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" containerName="registry-server" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092429 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0d6d4fb-8a29-4170-bba6-5416855a535a" containerName="mariadb-database-create" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092440 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="73321ea7-21d6-42e7-be3f-311cbcc3dfed" containerName="mariadb-account-create-update" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.092917 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.095570 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pgssh" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.095651 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.097341 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.097371 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.104626 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-4k497"] Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.165207 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.165341 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tzvb\" (UniqueName: \"kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.266542 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tzvb\" (UniqueName: \"kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.266625 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.272440 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.283712 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tzvb\" (UniqueName: \"kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb\") pod \"keystone-db-sync-4k497\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.408187 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.654558 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bddbf7a-3653-4d07-b5cc-ac1860f8a20c" path="/var/lib/kubelet/pods/0bddbf7a-3653-4d07-b5cc-ac1860f8a20c/volumes" Nov 24 18:03:37 crc kubenswrapper[4702]: I1124 18:03:37.803515 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-4k497"] Nov 24 18:03:37 crc kubenswrapper[4702]: W1124 18:03:37.810023 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3646f1dd_f4f1_4bdd_9f48_a2ac45542a14.slice/crio-536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8 WatchSource:0}: Error finding container 536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8: Status 404 returned error can't find the container with id 536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8 Nov 24 18:03:38 crc kubenswrapper[4702]: I1124 18:03:38.114674 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-4k497" event={"ID":"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14","Type":"ContainerStarted","Data":"536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8"} Nov 24 18:03:43 crc kubenswrapper[4702]: I1124 18:03:43.981999 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:43 crc kubenswrapper[4702]: I1124 18:03:43.982901 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:44 crc kubenswrapper[4702]: I1124 18:03:44.010207 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:44 crc kubenswrapper[4702]: I1124 18:03:44.172727 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-index-w2sb2" Nov 24 18:03:44 crc kubenswrapper[4702]: I1124 18:03:44.612818 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-index-sxlqv" Nov 24 18:03:45 crc kubenswrapper[4702]: I1124 18:03:45.158661 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-4k497" event={"ID":"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14","Type":"ContainerStarted","Data":"8888fdd1c725ff404aef9f5ff9f5c95a473980dadd67a82993d9db626f2e94b0"} Nov 24 18:03:45 crc kubenswrapper[4702]: I1124 18:03:45.175610 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-db-sync-4k497" podStartSLOduration=1.5155373349999999 podStartE2EDuration="8.175583665s" podCreationTimestamp="2025-11-24 18:03:37 +0000 UTC" firstStartedPulling="2025-11-24 18:03:37.813263681 +0000 UTC m=+907.054004845" lastFinishedPulling="2025-11-24 18:03:44.473310011 +0000 UTC m=+913.714051175" observedRunningTime="2025-11-24 18:03:45.171924526 +0000 UTC m=+914.412665740" watchObservedRunningTime="2025-11-24 18:03:45.175583665 +0000 UTC m=+914.416324869" Nov 24 18:03:48 crc kubenswrapper[4702]: I1124 18:03:48.177430 4702 generic.go:334] "Generic (PLEG): container finished" podID="3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" containerID="8888fdd1c725ff404aef9f5ff9f5c95a473980dadd67a82993d9db626f2e94b0" exitCode=0 Nov 24 18:03:48 crc kubenswrapper[4702]: I1124 18:03:48.177538 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-4k497" event={"ID":"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14","Type":"ContainerDied","Data":"8888fdd1c725ff404aef9f5ff9f5c95a473980dadd67a82993d9db626f2e94b0"} Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.441144 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.534424 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data\") pod \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.534465 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tzvb\" (UniqueName: \"kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb\") pod \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\" (UID: \"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14\") " Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.540016 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb" (OuterVolumeSpecName: "kube-api-access-4tzvb") pod "3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" (UID: "3646f1dd-f4f1-4bdd-9f48-a2ac45542a14"). InnerVolumeSpecName "kube-api-access-4tzvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.564603 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data" (OuterVolumeSpecName: "config-data") pod "3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" (UID: "3646f1dd-f4f1-4bdd-9f48-a2ac45542a14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.636420 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:49 crc kubenswrapper[4702]: I1124 18:03:49.636448 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tzvb\" (UniqueName: \"kubernetes.io/projected/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14-kube-api-access-4tzvb\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.190877 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-4k497" event={"ID":"3646f1dd-f4f1-4bdd-9f48-a2ac45542a14","Type":"ContainerDied","Data":"536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8"} Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.190919 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="536b5776b7f19c6606a414c1853f02275b6c58a88dd471c06960e74e3bb726f8" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.190924 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-4k497" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.372692 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-r5jmd"] Nov 24 18:03:50 crc kubenswrapper[4702]: E1124 18:03:50.373196 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" containerName="keystone-db-sync" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.373219 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" containerName="keystone-db-sync" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.373387 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" containerName="keystone-db-sync" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.374104 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.378498 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-r5jmd"] Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.378777 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.379189 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.379371 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pgssh" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.379540 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"osp-secret" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.386051 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.446889 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.446939 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.446969 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.447007 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxdj6\" (UniqueName: \"kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.447231 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.549154 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.549205 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.549227 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.549267 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxdj6\" (UniqueName: \"kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.549293 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.553436 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.553502 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.553512 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.553744 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.568911 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxdj6\" (UniqueName: \"kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6\") pod \"keystone-bootstrap-r5jmd\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:50 crc kubenswrapper[4702]: I1124 18:03:50.696944 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:51 crc kubenswrapper[4702]: I1124 18:03:51.079498 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-r5jmd"] Nov 24 18:03:51 crc kubenswrapper[4702]: W1124 18:03:51.085081 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7c02c76_0859_42bc_b1ac_4aeddd431161.slice/crio-c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be WatchSource:0}: Error finding container c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be: Status 404 returned error can't find the container with id c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be Nov 24 18:03:51 crc kubenswrapper[4702]: I1124 18:03:51.197408 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" event={"ID":"e7c02c76-0859-42bc-b1ac-4aeddd431161","Type":"ContainerStarted","Data":"c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be"} Nov 24 18:03:52 crc kubenswrapper[4702]: I1124 18:03:52.204046 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" event={"ID":"e7c02c76-0859-42bc-b1ac-4aeddd431161","Type":"ContainerStarted","Data":"1453fdb48fabc746672d5daeeb8035c48c2b42c07cb0aa341e0ddb80679d42f9"} Nov 24 18:03:52 crc kubenswrapper[4702]: I1124 18:03:52.221151 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" podStartSLOduration=2.221123676 podStartE2EDuration="2.221123676s" podCreationTimestamp="2025-11-24 18:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:03:52.220070667 +0000 UTC m=+921.460811851" watchObservedRunningTime="2025-11-24 18:03:52.221123676 +0000 UTC m=+921.461864840" Nov 24 18:03:52 crc kubenswrapper[4702]: I1124 18:03:52.482992 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:03:52 crc kubenswrapper[4702]: I1124 18:03:52.483043 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:03:54 crc kubenswrapper[4702]: I1124 18:03:54.216335 4702 generic.go:334] "Generic (PLEG): container finished" podID="e7c02c76-0859-42bc-b1ac-4aeddd431161" containerID="1453fdb48fabc746672d5daeeb8035c48c2b42c07cb0aa341e0ddb80679d42f9" exitCode=0 Nov 24 18:03:54 crc kubenswrapper[4702]: I1124 18:03:54.216412 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" event={"ID":"e7c02c76-0859-42bc-b1ac-4aeddd431161","Type":"ContainerDied","Data":"1453fdb48fabc746672d5daeeb8035c48c2b42c07cb0aa341e0ddb80679d42f9"} Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.491367 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.619841 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts\") pod \"e7c02c76-0859-42bc-b1ac-4aeddd431161\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.619873 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data\") pod \"e7c02c76-0859-42bc-b1ac-4aeddd431161\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.619918 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys\") pod \"e7c02c76-0859-42bc-b1ac-4aeddd431161\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.619956 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys\") pod \"e7c02c76-0859-42bc-b1ac-4aeddd431161\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.620051 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxdj6\" (UniqueName: \"kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6\") pod \"e7c02c76-0859-42bc-b1ac-4aeddd431161\" (UID: \"e7c02c76-0859-42bc-b1ac-4aeddd431161\") " Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.624944 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts" (OuterVolumeSpecName: "scripts") pod "e7c02c76-0859-42bc-b1ac-4aeddd431161" (UID: "e7c02c76-0859-42bc-b1ac-4aeddd431161"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.625211 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e7c02c76-0859-42bc-b1ac-4aeddd431161" (UID: "e7c02c76-0859-42bc-b1ac-4aeddd431161"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.625286 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6" (OuterVolumeSpecName: "kube-api-access-vxdj6") pod "e7c02c76-0859-42bc-b1ac-4aeddd431161" (UID: "e7c02c76-0859-42bc-b1ac-4aeddd431161"). InnerVolumeSpecName "kube-api-access-vxdj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.625523 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e7c02c76-0859-42bc-b1ac-4aeddd431161" (UID: "e7c02c76-0859-42bc-b1ac-4aeddd431161"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.637973 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data" (OuterVolumeSpecName: "config-data") pod "e7c02c76-0859-42bc-b1ac-4aeddd431161" (UID: "e7c02c76-0859-42bc-b1ac-4aeddd431161"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.722111 4702 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.722163 4702 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.722173 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxdj6\" (UniqueName: \"kubernetes.io/projected/e7c02c76-0859-42bc-b1ac-4aeddd431161-kube-api-access-vxdj6\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.722183 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:55 crc kubenswrapper[4702]: I1124 18:03:55.722192 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7c02c76-0859-42bc-b1ac-4aeddd431161-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.228238 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" event={"ID":"e7c02c76-0859-42bc-b1ac-4aeddd431161","Type":"ContainerDied","Data":"c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be"} Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.228288 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c397147247b55c59bdd7ac61a645ffc2c126b9b09823f0f36243618d52eb67be" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.228325 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-r5jmd" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.401278 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-5d44bcfcdf-szfx6"] Nov 24 18:03:56 crc kubenswrapper[4702]: E1124 18:03:56.401520 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7c02c76-0859-42bc-b1ac-4aeddd431161" containerName="keystone-bootstrap" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.401535 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7c02c76-0859-42bc-b1ac-4aeddd431161" containerName="keystone-bootstrap" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.401697 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7c02c76-0859-42bc-b1ac-4aeddd431161" containerName="keystone-bootstrap" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.402138 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.404213 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.404282 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pgssh" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.404771 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.404994 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.417078 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-5d44bcfcdf-szfx6"] Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.534326 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4qld\" (UniqueName: \"kubernetes.io/projected/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-kube-api-access-z4qld\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.534415 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-config-data\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.534501 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-credential-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.534553 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-fernet-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.534593 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-scripts\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.636466 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-config-data\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.636554 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-credential-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.636592 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-fernet-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.636616 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-scripts\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.636658 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4qld\" (UniqueName: \"kubernetes.io/projected/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-kube-api-access-z4qld\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.641755 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-scripts\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.641759 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-credential-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.641953 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-config-data\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.642026 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-fernet-keys\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.652968 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4qld\" (UniqueName: \"kubernetes.io/projected/8cb198f9-f3d0-4d3b-b99e-95427f1bff17-kube-api-access-z4qld\") pod \"keystone-5d44bcfcdf-szfx6\" (UID: \"8cb198f9-f3d0-4d3b-b99e-95427f1bff17\") " pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:56 crc kubenswrapper[4702]: I1124 18:03:56.716646 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:57 crc kubenswrapper[4702]: I1124 18:03:57.108545 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-5d44bcfcdf-szfx6"] Nov 24 18:03:57 crc kubenswrapper[4702]: I1124 18:03:57.234499 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" event={"ID":"8cb198f9-f3d0-4d3b-b99e-95427f1bff17","Type":"ContainerStarted","Data":"ed40dabe7d107c1f7edb0bba1c1145e8585c664462ec273da552ef689197e611"} Nov 24 18:03:58 crc kubenswrapper[4702]: I1124 18:03:58.243064 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" event={"ID":"8cb198f9-f3d0-4d3b-b99e-95427f1bff17","Type":"ContainerStarted","Data":"ec4852e5ebe45ee41bbb141db7c626a843c4a7402b93c77c5e7efb49561eba95"} Nov 24 18:03:58 crc kubenswrapper[4702]: I1124 18:03:58.243653 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:03:58 crc kubenswrapper[4702]: I1124 18:03:58.262416 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" podStartSLOduration=2.262395519 podStartE2EDuration="2.262395519s" podCreationTimestamp="2025-11-24 18:03:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:03:58.258465674 +0000 UTC m=+927.499206848" watchObservedRunningTime="2025-11-24 18:03:58.262395519 +0000 UTC m=+927.503136693" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.315117 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd"] Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.318623 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.322435 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.327129 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd"] Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.476425 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2v9z\" (UniqueName: \"kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.476792 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.476926 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.578376 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.578439 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.578495 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2v9z\" (UniqueName: \"kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.578944 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.579097 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.602038 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2v9z\" (UniqueName: \"kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z\") pod \"62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:06 crc kubenswrapper[4702]: I1124 18:04:06.645970 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.050319 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd"] Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.105972 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd"] Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.107354 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.117533 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd"] Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.187002 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.187311 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5pqf\" (UniqueName: \"kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.187479 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.288787 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.289430 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.289598 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5pqf\" (UniqueName: \"kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.289443 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.289743 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.299488 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" event={"ID":"9905111d-899a-490f-813a-027435cc85bf","Type":"ContainerStarted","Data":"e1419690ea5a3b7c6e66ecbbf89a85f2c2aa6fa1319b540c89ac262522e626c5"} Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.311113 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5pqf\" (UniqueName: \"kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf\") pod \"440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.524255 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:07 crc kubenswrapper[4702]: I1124 18:04:07.902361 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd"] Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.311347 4702 generic.go:334] "Generic (PLEG): container finished" podID="9905111d-899a-490f-813a-027435cc85bf" containerID="32a7daa78cf06adff24221da27f67e6c6707c8067b199f25bc5cab53b94916e2" exitCode=0 Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.311406 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" event={"ID":"9905111d-899a-490f-813a-027435cc85bf","Type":"ContainerDied","Data":"32a7daa78cf06adff24221da27f67e6c6707c8067b199f25bc5cab53b94916e2"} Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.313080 4702 generic.go:334] "Generic (PLEG): container finished" podID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerID="4e7ebb3456d57ca8fe4d5f66705fd0e1bc286a928d5b82477a794d274eb85427" exitCode=0 Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.313157 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" event={"ID":"85f0cdb4-6612-4d20-ab62-f7a82947bf1f","Type":"ContainerDied","Data":"4e7ebb3456d57ca8fe4d5f66705fd0e1bc286a928d5b82477a794d274eb85427"} Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.313175 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" event={"ID":"85f0cdb4-6612-4d20-ab62-f7a82947bf1f","Type":"ContainerStarted","Data":"94c8cfb7d71b1a8125ddd0bc7f18305725e4d50ea66b811f96eb05e795251f96"} Nov 24 18:04:08 crc kubenswrapper[4702]: I1124 18:04:08.314311 4702 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 18:04:09 crc kubenswrapper[4702]: I1124 18:04:09.320966 4702 generic.go:334] "Generic (PLEG): container finished" podID="9905111d-899a-490f-813a-027435cc85bf" containerID="5235e465edda1bfbce7d4c074cd1fee1ac0120f928c2963259dfae2d55e6b638" exitCode=0 Nov 24 18:04:09 crc kubenswrapper[4702]: I1124 18:04:09.321011 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" event={"ID":"9905111d-899a-490f-813a-027435cc85bf","Type":"ContainerDied","Data":"5235e465edda1bfbce7d4c074cd1fee1ac0120f928c2963259dfae2d55e6b638"} Nov 24 18:04:09 crc kubenswrapper[4702]: I1124 18:04:09.324555 4702 generic.go:334] "Generic (PLEG): container finished" podID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerID="5d3e03bfdc55082f3845c8c46575f4776c73e0116195bed53dc03ec0ab068f2b" exitCode=0 Nov 24 18:04:09 crc kubenswrapper[4702]: I1124 18:04:09.324619 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" event={"ID":"85f0cdb4-6612-4d20-ab62-f7a82947bf1f","Type":"ContainerDied","Data":"5d3e03bfdc55082f3845c8c46575f4776c73e0116195bed53dc03ec0ab068f2b"} Nov 24 18:04:10 crc kubenswrapper[4702]: I1124 18:04:10.334580 4702 generic.go:334] "Generic (PLEG): container finished" podID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerID="6575ac06d7622024cf5720128e84569575f4b6ebee6b5b195f2cab66c922d14e" exitCode=0 Nov 24 18:04:10 crc kubenswrapper[4702]: I1124 18:04:10.334676 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" event={"ID":"85f0cdb4-6612-4d20-ab62-f7a82947bf1f","Type":"ContainerDied","Data":"6575ac06d7622024cf5720128e84569575f4b6ebee6b5b195f2cab66c922d14e"} Nov 24 18:04:10 crc kubenswrapper[4702]: I1124 18:04:10.337640 4702 generic.go:334] "Generic (PLEG): container finished" podID="9905111d-899a-490f-813a-027435cc85bf" containerID="464513dae9b44ab65691038aa57a0933e881d7f1ccfecf8b0f92900b0977daa0" exitCode=0 Nov 24 18:04:10 crc kubenswrapper[4702]: I1124 18:04:10.337682 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" event={"ID":"9905111d-899a-490f-813a-027435cc85bf","Type":"ContainerDied","Data":"464513dae9b44ab65691038aa57a0933e881d7f1ccfecf8b0f92900b0977daa0"} Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.645791 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.651070 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.747924 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5pqf\" (UniqueName: \"kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf\") pod \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.747973 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util\") pod \"9905111d-899a-490f-813a-027435cc85bf\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.748001 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle\") pod \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.748068 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle\") pod \"9905111d-899a-490f-813a-027435cc85bf\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.748085 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util\") pod \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\" (UID: \"85f0cdb4-6612-4d20-ab62-f7a82947bf1f\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.748887 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle" (OuterVolumeSpecName: "bundle") pod "9905111d-899a-490f-813a-027435cc85bf" (UID: "9905111d-899a-490f-813a-027435cc85bf"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.749070 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2v9z\" (UniqueName: \"kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z\") pod \"9905111d-899a-490f-813a-027435cc85bf\" (UID: \"9905111d-899a-490f-813a-027435cc85bf\") " Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.749106 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle" (OuterVolumeSpecName: "bundle") pod "85f0cdb4-6612-4d20-ab62-f7a82947bf1f" (UID: "85f0cdb4-6612-4d20-ab62-f7a82947bf1f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.750513 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.750540 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.752997 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf" (OuterVolumeSpecName: "kube-api-access-k5pqf") pod "85f0cdb4-6612-4d20-ab62-f7a82947bf1f" (UID: "85f0cdb4-6612-4d20-ab62-f7a82947bf1f"). InnerVolumeSpecName "kube-api-access-k5pqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.753753 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z" (OuterVolumeSpecName: "kube-api-access-t2v9z") pod "9905111d-899a-490f-813a-027435cc85bf" (UID: "9905111d-899a-490f-813a-027435cc85bf"). InnerVolumeSpecName "kube-api-access-t2v9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.761123 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util" (OuterVolumeSpecName: "util") pod "9905111d-899a-490f-813a-027435cc85bf" (UID: "9905111d-899a-490f-813a-027435cc85bf"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.777073 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util" (OuterVolumeSpecName: "util") pod "85f0cdb4-6612-4d20-ab62-f7a82947bf1f" (UID: "85f0cdb4-6612-4d20-ab62-f7a82947bf1f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.851585 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.851862 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2v9z\" (UniqueName: \"kubernetes.io/projected/9905111d-899a-490f-813a-027435cc85bf-kube-api-access-t2v9z\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.851981 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5pqf\" (UniqueName: \"kubernetes.io/projected/85f0cdb4-6612-4d20-ab62-f7a82947bf1f-kube-api-access-k5pqf\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:11 crc kubenswrapper[4702]: I1124 18:04:11.852090 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9905111d-899a-490f-813a-027435cc85bf-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.353201 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.353167 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd" event={"ID":"9905111d-899a-490f-813a-027435cc85bf","Type":"ContainerDied","Data":"e1419690ea5a3b7c6e66ecbbf89a85f2c2aa6fa1319b540c89ac262522e626c5"} Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.353641 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1419690ea5a3b7c6e66ecbbf89a85f2c2aa6fa1319b540c89ac262522e626c5" Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.355583 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" event={"ID":"85f0cdb4-6612-4d20-ab62-f7a82947bf1f","Type":"ContainerDied","Data":"94c8cfb7d71b1a8125ddd0bc7f18305725e4d50ea66b811f96eb05e795251f96"} Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.355620 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94c8cfb7d71b1a8125ddd0bc7f18305725e4d50ea66b811f96eb05e795251f96" Nov 24 18:04:12 crc kubenswrapper[4702]: I1124 18:04:12.355691 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd" Nov 24 18:04:12 crc kubenswrapper[4702]: E1124 18:04:12.453992 4702 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85f0cdb4_6612_4d20_ab62_f7a82947bf1f.slice/crio-94c8cfb7d71b1a8125ddd0bc7f18305725e4d50ea66b811f96eb05e795251f96\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9905111d_899a_490f_813a_027435cc85bf.slice/crio-e1419690ea5a3b7c6e66ecbbf89a85f2c2aa6fa1319b540c89ac262522e626c5\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9905111d_899a_490f_813a_027435cc85bf.slice\": RecentStats: unable to find data in memory cache]" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646054 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz"] Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646789 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="pull" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646847 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="pull" Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646857 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646864 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646883 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="util" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646889 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="util" Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646897 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646903 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646913 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="util" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646918 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="util" Nov 24 18:04:21 crc kubenswrapper[4702]: E1124 18:04:21.646928 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="pull" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.646934 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="pull" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.647042 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="85f0cdb4-6612-4d20-ab62-f7a82947bf1f" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.647051 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="9905111d-899a-490f-813a-027435cc85bf" containerName="extract" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.647899 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.658922 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-mjpms" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.659585 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-service-cert" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.671943 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz"] Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.796102 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvd8h\" (UniqueName: \"kubernetes.io/projected/9637afb3-11e9-4870-a0c6-564bb3983c36-kube-api-access-fvd8h\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.796643 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-webhook-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.796768 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-apiservice-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.898443 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-apiservice-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.898527 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvd8h\" (UniqueName: \"kubernetes.io/projected/9637afb3-11e9-4870-a0c6-564bb3983c36-kube-api-access-fvd8h\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.898571 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-webhook-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.904224 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-webhook-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.904726 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9637afb3-11e9-4870-a0c6-564bb3983c36-apiservice-cert\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.914360 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvd8h\" (UniqueName: \"kubernetes.io/projected/9637afb3-11e9-4870-a0c6-564bb3983c36-kube-api-access-fvd8h\") pod \"swift-operator-controller-manager-bfd5974c7-qr9hz\" (UID: \"9637afb3-11e9-4870-a0c6-564bb3983c36\") " pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:21 crc kubenswrapper[4702]: I1124 18:04:21.975574 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.176128 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz"] Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.414772 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" event={"ID":"9637afb3-11e9-4870-a0c6-564bb3983c36","Type":"ContainerStarted","Data":"1095248832dd723c9ac26273c7e452dacfae7598bb17853ca9d78d8897ba72f3"} Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.484313 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.484407 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.484486 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.485291 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:04:22 crc kubenswrapper[4702]: I1124 18:04:22.485386 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c" gracePeriod=600 Nov 24 18:04:22 crc kubenswrapper[4702]: E1124 18:04:22.613397 4702 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a77fa32_4f49_4b02_ac4a_fbad4d33e499.slice/crio-4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c.scope\": RecentStats: unable to find data in memory cache]" Nov 24 18:04:23 crc kubenswrapper[4702]: I1124 18:04:23.424142 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c" exitCode=0 Nov 24 18:04:23 crc kubenswrapper[4702]: I1124 18:04:23.424208 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c"} Nov 24 18:04:23 crc kubenswrapper[4702]: I1124 18:04:23.425496 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29"} Nov 24 18:04:23 crc kubenswrapper[4702]: I1124 18:04:23.425584 4702 scope.go:117] "RemoveContainer" containerID="a76dadb7c1408f4ed37b1c1be9abc06e8184dfb47caaa5bb611f1fe3d9cdd32b" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.442121 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" event={"ID":"9637afb3-11e9-4870-a0c6-564bb3983c36","Type":"ContainerStarted","Data":"4b5bdb1413e49c214916f670ba6d7441347c22ec249d44a57d2e7a4a9a03f2c9"} Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.442709 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" event={"ID":"9637afb3-11e9-4870-a0c6-564bb3983c36","Type":"ContainerStarted","Data":"7025f1ddec48235c70c44b3e45a763ebda2e287ae533178185be4a1ef60a9576"} Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.442734 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.463019 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" podStartSLOduration=2.087447457 podStartE2EDuration="4.463001955s" podCreationTimestamp="2025-11-24 18:04:21 +0000 UTC" firstStartedPulling="2025-11-24 18:04:22.191015951 +0000 UTC m=+951.431757115" lastFinishedPulling="2025-11-24 18:04:24.566570449 +0000 UTC m=+953.807311613" observedRunningTime="2025-11-24 18:04:25.46245328 +0000 UTC m=+954.703194444" watchObservedRunningTime="2025-11-24 18:04:25.463001955 +0000 UTC m=+954.703743119" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.625174 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw"] Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.626536 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.628228 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-4p6qj" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.629203 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-service-cert" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.637493 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw"] Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.758514 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-apiservice-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.758556 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-webhook-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.758575 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8khd\" (UniqueName: \"kubernetes.io/projected/b12d50df-848e-49f1-800c-316f9339557b-kube-api-access-b8khd\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.860392 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-apiservice-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.860629 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-webhook-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.860700 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8khd\" (UniqueName: \"kubernetes.io/projected/b12d50df-848e-49f1-800c-316f9339557b-kube-api-access-b8khd\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.866377 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-webhook-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.866385 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b12d50df-848e-49f1-800c-316f9339557b-apiservice-cert\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.875401 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8khd\" (UniqueName: \"kubernetes.io/projected/b12d50df-848e-49f1-800c-316f9339557b-kube-api-access-b8khd\") pod \"horizon-operator-controller-manager-79b8cddcd-6x5pw\" (UID: \"b12d50df-848e-49f1-800c-316f9339557b\") " pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:25 crc kubenswrapper[4702]: I1124 18:04:25.944000 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:26 crc kubenswrapper[4702]: I1124 18:04:26.350068 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw"] Nov 24 18:04:26 crc kubenswrapper[4702]: W1124 18:04:26.353372 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb12d50df_848e_49f1_800c_316f9339557b.slice/crio-68f9a4e08243ceecc0cb3c47dcf1401e989e9c8282f77638cdaefa3e8b08e0e4 WatchSource:0}: Error finding container 68f9a4e08243ceecc0cb3c47dcf1401e989e9c8282f77638cdaefa3e8b08e0e4: Status 404 returned error can't find the container with id 68f9a4e08243ceecc0cb3c47dcf1401e989e9c8282f77638cdaefa3e8b08e0e4 Nov 24 18:04:26 crc kubenswrapper[4702]: I1124 18:04:26.448245 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" event={"ID":"b12d50df-848e-49f1-800c-316f9339557b","Type":"ContainerStarted","Data":"68f9a4e08243ceecc0cb3c47dcf1401e989e9c8282f77638cdaefa3e8b08e0e4"} Nov 24 18:04:28 crc kubenswrapper[4702]: I1124 18:04:28.280364 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/keystone-5d44bcfcdf-szfx6" Nov 24 18:04:28 crc kubenswrapper[4702]: I1124 18:04:28.461068 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" event={"ID":"b12d50df-848e-49f1-800c-316f9339557b","Type":"ContainerStarted","Data":"1d20001f3aa7aa921ff2a97133e5b702e230bc581df61cbfb3b7d70fee9ef67a"} Nov 24 18:04:28 crc kubenswrapper[4702]: I1124 18:04:28.461424 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" event={"ID":"b12d50df-848e-49f1-800c-316f9339557b","Type":"ContainerStarted","Data":"79d295757a1ddc1d149b4abb292094b3b84477e76773b253401cb972d8349d4f"} Nov 24 18:04:28 crc kubenswrapper[4702]: I1124 18:04:28.461449 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:31 crc kubenswrapper[4702]: I1124 18:04:31.981381 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-bfd5974c7-qr9hz" Nov 24 18:04:32 crc kubenswrapper[4702]: I1124 18:04:32.012732 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" podStartSLOduration=5.5108054939999995 podStartE2EDuration="7.012714808s" podCreationTimestamp="2025-11-24 18:04:25 +0000 UTC" firstStartedPulling="2025-11-24 18:04:26.355387463 +0000 UTC m=+955.596128627" lastFinishedPulling="2025-11-24 18:04:27.857296767 +0000 UTC m=+957.098037941" observedRunningTime="2025-11-24 18:04:28.478162979 +0000 UTC m=+957.718904153" watchObservedRunningTime="2025-11-24 18:04:32.012714808 +0000 UTC m=+961.253455972" Nov 24 18:04:35 crc kubenswrapper[4702]: I1124 18:04:35.852581 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:35 crc kubenswrapper[4702]: I1124 18:04:35.853939 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:35 crc kubenswrapper[4702]: I1124 18:04:35.858764 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-index-dockercfg-mqm5c" Nov 24 18:04:35 crc kubenswrapper[4702]: I1124 18:04:35.864726 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:35 crc kubenswrapper[4702]: I1124 18:04:35.949204 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-79b8cddcd-6x5pw" Nov 24 18:04:36 crc kubenswrapper[4702]: I1124 18:04:36.008610 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvj5c\" (UniqueName: \"kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c\") pod \"glance-operator-index-p7825\" (UID: \"4f728705-13d8-4833-bd87-9e9174b3f320\") " pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:36 crc kubenswrapper[4702]: I1124 18:04:36.109806 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvj5c\" (UniqueName: \"kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c\") pod \"glance-operator-index-p7825\" (UID: \"4f728705-13d8-4833-bd87-9e9174b3f320\") " pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:36 crc kubenswrapper[4702]: I1124 18:04:36.139648 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvj5c\" (UniqueName: \"kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c\") pod \"glance-operator-index-p7825\" (UID: \"4f728705-13d8-4833-bd87-9e9174b3f320\") " pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:36 crc kubenswrapper[4702]: I1124 18:04:36.180736 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:36 crc kubenswrapper[4702]: I1124 18:04:36.562623 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:36 crc kubenswrapper[4702]: W1124 18:04:36.567099 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f728705_13d8_4833_bd87_9e9174b3f320.slice/crio-d9aba1b17ebee44c6ecb13e340bc2e773044f838d0d4d0e6a4edb2828dea1e07 WatchSource:0}: Error finding container d9aba1b17ebee44c6ecb13e340bc2e773044f838d0d4d0e6a4edb2828dea1e07: Status 404 returned error can't find the container with id d9aba1b17ebee44c6ecb13e340bc2e773044f838d0d4d0e6a4edb2828dea1e07 Nov 24 18:04:37 crc kubenswrapper[4702]: I1124 18:04:37.521371 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-p7825" event={"ID":"4f728705-13d8-4833-bd87-9e9174b3f320","Type":"ContainerStarted","Data":"d9aba1b17ebee44c6ecb13e340bc2e773044f838d0d4d0e6a4edb2828dea1e07"} Nov 24 18:04:38 crc kubenswrapper[4702]: I1124 18:04:38.528087 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-p7825" event={"ID":"4f728705-13d8-4833-bd87-9e9174b3f320","Type":"ContainerStarted","Data":"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0"} Nov 24 18:04:38 crc kubenswrapper[4702]: I1124 18:04:38.541007 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-index-p7825" podStartSLOduration=2.064753818 podStartE2EDuration="3.540987221s" podCreationTimestamp="2025-11-24 18:04:35 +0000 UTC" firstStartedPulling="2025-11-24 18:04:36.568764825 +0000 UTC m=+965.809505989" lastFinishedPulling="2025-11-24 18:04:38.044998228 +0000 UTC m=+967.285739392" observedRunningTime="2025-11-24 18:04:38.540727354 +0000 UTC m=+967.781468538" watchObservedRunningTime="2025-11-24 18:04:38.540987221 +0000 UTC m=+967.781728385" Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.044597 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.544085 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/glance-operator-index-p7825" podUID="4f728705-13d8-4833-bd87-9e9174b3f320" containerName="registry-server" containerID="cri-o://496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0" gracePeriod=2 Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.653971 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-index-lgh2x"] Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.654791 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.662951 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-lgh2x"] Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.768904 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czfc6\" (UniqueName: \"kubernetes.io/projected/71579ed0-43ea-4a86-a7dd-f7ab0351a1c0-kube-api-access-czfc6\") pod \"glance-operator-index-lgh2x\" (UID: \"71579ed0-43ea-4a86-a7dd-f7ab0351a1c0\") " pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.870299 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czfc6\" (UniqueName: \"kubernetes.io/projected/71579ed0-43ea-4a86-a7dd-f7ab0351a1c0-kube-api-access-czfc6\") pod \"glance-operator-index-lgh2x\" (UID: \"71579ed0-43ea-4a86-a7dd-f7ab0351a1c0\") " pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:40 crc kubenswrapper[4702]: I1124 18:04:40.889329 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czfc6\" (UniqueName: \"kubernetes.io/projected/71579ed0-43ea-4a86-a7dd-f7ab0351a1c0-kube-api-access-czfc6\") pod \"glance-operator-index-lgh2x\" (UID: \"71579ed0-43ea-4a86-a7dd-f7ab0351a1c0\") " pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.066886 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.421790 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.456863 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-lgh2x"] Nov 24 18:04:41 crc kubenswrapper[4702]: W1124 18:04:41.461284 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71579ed0_43ea_4a86_a7dd_f7ab0351a1c0.slice/crio-aca660f983b6ff7d32a7a61c3f64cb822a95aac64adbb757d763b9eca93d5763 WatchSource:0}: Error finding container aca660f983b6ff7d32a7a61c3f64cb822a95aac64adbb757d763b9eca93d5763: Status 404 returned error can't find the container with id aca660f983b6ff7d32a7a61c3f64cb822a95aac64adbb757d763b9eca93d5763 Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.554794 4702 generic.go:334] "Generic (PLEG): container finished" podID="4f728705-13d8-4833-bd87-9e9174b3f320" containerID="496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0" exitCode=0 Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.555008 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-p7825" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.554998 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-p7825" event={"ID":"4f728705-13d8-4833-bd87-9e9174b3f320","Type":"ContainerDied","Data":"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0"} Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.555074 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-p7825" event={"ID":"4f728705-13d8-4833-bd87-9e9174b3f320","Type":"ContainerDied","Data":"d9aba1b17ebee44c6ecb13e340bc2e773044f838d0d4d0e6a4edb2828dea1e07"} Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.555101 4702 scope.go:117] "RemoveContainer" containerID="496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.556937 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-lgh2x" event={"ID":"71579ed0-43ea-4a86-a7dd-f7ab0351a1c0","Type":"ContainerStarted","Data":"aca660f983b6ff7d32a7a61c3f64cb822a95aac64adbb757d763b9eca93d5763"} Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.571078 4702 scope.go:117] "RemoveContainer" containerID="496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0" Nov 24 18:04:41 crc kubenswrapper[4702]: E1124 18:04:41.571901 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0\": container with ID starting with 496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0 not found: ID does not exist" containerID="496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.571951 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0"} err="failed to get container status \"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0\": rpc error: code = NotFound desc = could not find container \"496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0\": container with ID starting with 496835c25e44b46fe7bba6fdaacc894f8ded3830745391ca446e210eb44826f0 not found: ID does not exist" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.581156 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvj5c\" (UniqueName: \"kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c\") pod \"4f728705-13d8-4833-bd87-9e9174b3f320\" (UID: \"4f728705-13d8-4833-bd87-9e9174b3f320\") " Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.586319 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c" (OuterVolumeSpecName: "kube-api-access-lvj5c") pod "4f728705-13d8-4833-bd87-9e9174b3f320" (UID: "4f728705-13d8-4833-bd87-9e9174b3f320"). InnerVolumeSpecName "kube-api-access-lvj5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.682606 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvj5c\" (UniqueName: \"kubernetes.io/projected/4f728705-13d8-4833-bd87-9e9174b3f320-kube-api-access-lvj5c\") on node \"crc\" DevicePath \"\"" Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.871388 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:41 crc kubenswrapper[4702]: I1124 18:04:41.885783 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/glance-operator-index-p7825"] Nov 24 18:04:42 crc kubenswrapper[4702]: I1124 18:04:42.566382 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-lgh2x" event={"ID":"71579ed0-43ea-4a86-a7dd-f7ab0351a1c0","Type":"ContainerStarted","Data":"fbe8e871b55dd24d7af3b712c158730768f0c3662b5f080ad17c38fa23501292"} Nov 24 18:04:42 crc kubenswrapper[4702]: I1124 18:04:42.582013 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-index-lgh2x" podStartSLOduration=2.5304075470000003 podStartE2EDuration="2.581996074s" podCreationTimestamp="2025-11-24 18:04:40 +0000 UTC" firstStartedPulling="2025-11-24 18:04:41.463197496 +0000 UTC m=+970.703938650" lastFinishedPulling="2025-11-24 18:04:41.514786013 +0000 UTC m=+970.755527177" observedRunningTime="2025-11-24 18:04:42.580107513 +0000 UTC m=+971.820848677" watchObservedRunningTime="2025-11-24 18:04:42.581996074 +0000 UTC m=+971.822737238" Nov 24 18:04:43 crc kubenswrapper[4702]: I1124 18:04:43.657507 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f728705-13d8-4833-bd87-9e9174b3f320" path="/var/lib/kubelet/pods/4f728705-13d8-4833-bd87-9e9174b3f320/volumes" Nov 24 18:04:51 crc kubenswrapper[4702]: I1124 18:04:51.067956 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:51 crc kubenswrapper[4702]: I1124 18:04:51.068447 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:51 crc kubenswrapper[4702]: I1124 18:04:51.091783 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:51 crc kubenswrapper[4702]: I1124 18:04:51.646269 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-index-lgh2x" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.898158 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Nov 24 18:04:54 crc kubenswrapper[4702]: E1124 18:04:54.898680 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f728705-13d8-4833-bd87-9e9174b3f320" containerName="registry-server" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.898696 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f728705-13d8-4833-bd87-9e9174b3f320" containerName="registry-server" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.898854 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f728705-13d8-4833-bd87-9e9174b3f320" containerName="registry-server" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.908222 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.911962 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-conf" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.912728 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-storage-config-data" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.915046 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-swift-dockercfg-kvg8p" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.915534 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-files" Nov 24 18:04:54 crc kubenswrapper[4702]: I1124 18:04:54.923328 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.066132 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-lock\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.066205 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lq6vx\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-kube-api-access-lq6vx\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.066233 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.066260 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-cache\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.066316 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.081572 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m"] Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.083021 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.085883 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n5krf" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.092592 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m"] Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168116 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpsx4\" (UniqueName: \"kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168189 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168238 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168260 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-lock\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168285 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168307 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lq6vx\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-kube-api-access-lq6vx\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168326 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168352 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-cache\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.168790 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.168846 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.168918 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:04:55.6688971 +0000 UTC m=+984.909638284 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.168991 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") device mount path \"/mnt/openstack/pv03\"" pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.169213 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-cache\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.169377 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-lock\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.186995 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lq6vx\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-kube-api-access-lq6vx\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.196385 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.269179 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.269235 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.269292 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpsx4\" (UniqueName: \"kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.270263 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.270276 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.286584 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpsx4\" (UniqueName: \"kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4\") pod \"5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.397052 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.676065 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.676264 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.676278 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: E1124 18:04:55.676378 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:04:56.676362369 +0000 UTC m=+985.917103523 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:04:55 crc kubenswrapper[4702]: I1124 18:04:55.810680 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m"] Nov 24 18:04:56 crc kubenswrapper[4702]: I1124 18:04:56.651282 4702 generic.go:334] "Generic (PLEG): container finished" podID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerID="87db81b820c5da2f2b859db027b843eb2f2a5f252e97dbd23bcc753ec1da593b" exitCode=0 Nov 24 18:04:56 crc kubenswrapper[4702]: I1124 18:04:56.651326 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" event={"ID":"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd","Type":"ContainerDied","Data":"87db81b820c5da2f2b859db027b843eb2f2a5f252e97dbd23bcc753ec1da593b"} Nov 24 18:04:56 crc kubenswrapper[4702]: I1124 18:04:56.651535 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" event={"ID":"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd","Type":"ContainerStarted","Data":"171934ac550ec828b04dcf9129f32f0064a3e8660e12dd5916491a75712070a6"} Nov 24 18:04:56 crc kubenswrapper[4702]: I1124 18:04:56.690013 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:56 crc kubenswrapper[4702]: E1124 18:04:56.690186 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:04:56 crc kubenswrapper[4702]: E1124 18:04:56.690204 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:04:56 crc kubenswrapper[4702]: E1124 18:04:56.690256 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:04:58.69024125 +0000 UTC m=+987.930982404 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:04:57 crc kubenswrapper[4702]: I1124 18:04:57.657916 4702 generic.go:334] "Generic (PLEG): container finished" podID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerID="3240b17132ca7dc158856497668d0b4c160c4e13c6222f325f6260113bcb6436" exitCode=0 Nov 24 18:04:57 crc kubenswrapper[4702]: I1124 18:04:57.658319 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" event={"ID":"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd","Type":"ContainerDied","Data":"3240b17132ca7dc158856497668d0b4c160c4e13c6222f325f6260113bcb6436"} Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.187386 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-pkkdm"] Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.188406 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.190186 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-config-data" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.190310 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-proxy-config-data" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.190522 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-scripts" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.219398 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-pkkdm"] Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.314721 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.314839 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4222l\" (UniqueName: \"kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.314879 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.314906 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.315122 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.315253 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416447 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416524 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4222l\" (UniqueName: \"kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416550 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416564 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416587 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416614 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.416954 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.417457 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.417628 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.422606 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.422682 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.433623 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4222l\" (UniqueName: \"kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l\") pod \"swift-ring-rebalance-pkkdm\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.515783 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.679722 4702 generic.go:334] "Generic (PLEG): container finished" podID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerID="f130384437d73e8495b69cf14eb04f3aba6d5a635976f90b22492b85ff48953d" exitCode=0 Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.679768 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" event={"ID":"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd","Type":"ContainerDied","Data":"f130384437d73e8495b69cf14eb04f3aba6d5a635976f90b22492b85ff48953d"} Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.721890 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:04:58 crc kubenswrapper[4702]: E1124 18:04:58.722057 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:04:58 crc kubenswrapper[4702]: E1124 18:04:58.722071 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:04:58 crc kubenswrapper[4702]: E1124 18:04:58.722118 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:05:02.722103288 +0000 UTC m=+991.962844452 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:04:58 crc kubenswrapper[4702]: I1124 18:04:58.910014 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-pkkdm"] Nov 24 18:04:58 crc kubenswrapper[4702]: W1124 18:04:58.914984 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14a2829d_0ef6_4544_aae9_f4bd9ddb061d.slice/crio-d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0 WatchSource:0}: Error finding container d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0: Status 404 returned error can't find the container with id d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0 Nov 24 18:04:59 crc kubenswrapper[4702]: I1124 18:04:59.689847 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" event={"ID":"14a2829d-0ef6-4544-aae9-f4bd9ddb061d","Type":"ContainerStarted","Data":"d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0"} Nov 24 18:04:59 crc kubenswrapper[4702]: I1124 18:04:59.977873 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.038660 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle\") pod \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.038755 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpsx4\" (UniqueName: \"kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4\") pod \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.038785 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util\") pod \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\" (UID: \"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd\") " Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.040356 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle" (OuterVolumeSpecName: "bundle") pod "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" (UID: "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.044467 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4" (OuterVolumeSpecName: "kube-api-access-lpsx4") pod "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" (UID: "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd"). InnerVolumeSpecName "kube-api-access-lpsx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.053857 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util" (OuterVolumeSpecName: "util") pod "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" (UID: "4181b945-25f4-44ce-8bf9-f9fd2b0b61fd"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.141151 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpsx4\" (UniqueName: \"kubernetes.io/projected/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-kube-api-access-lpsx4\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.141200 4702 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-util\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.141213 4702 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4181b945-25f4-44ce-8bf9-f9fd2b0b61fd-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.699792 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.699759 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m" event={"ID":"4181b945-25f4-44ce-8bf9-f9fd2b0b61fd","Type":"ContainerDied","Data":"171934ac550ec828b04dcf9129f32f0064a3e8660e12dd5916491a75712070a6"} Nov 24 18:05:00 crc kubenswrapper[4702]: I1124 18:05:00.699852 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="171934ac550ec828b04dcf9129f32f0064a3e8660e12dd5916491a75712070a6" Nov 24 18:05:02 crc kubenswrapper[4702]: I1124 18:05:02.782555 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:05:02 crc kubenswrapper[4702]: E1124 18:05:02.782692 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:05:02 crc kubenswrapper[4702]: E1124 18:05:02.782878 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:05:02 crc kubenswrapper[4702]: E1124 18:05:02.782927 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:05:10.782909539 +0000 UTC m=+1000.023650703 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:05:07 crc kubenswrapper[4702]: I1124 18:05:07.745114 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" event={"ID":"14a2829d-0ef6-4544-aae9-f4bd9ddb061d","Type":"ContainerStarted","Data":"64e02f6a8617e119eb4587f5c258e4285d757890c1106b3f00c13469e9d87104"} Nov 24 18:05:07 crc kubenswrapper[4702]: I1124 18:05:07.777228 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" podStartSLOduration=1.485717444 podStartE2EDuration="9.777202957s" podCreationTimestamp="2025-11-24 18:04:58 +0000 UTC" firstStartedPulling="2025-11-24 18:04:58.9173385 +0000 UTC m=+988.158079664" lastFinishedPulling="2025-11-24 18:05:07.208824013 +0000 UTC m=+996.449565177" observedRunningTime="2025-11-24 18:05:07.770229205 +0000 UTC m=+997.010970369" watchObservedRunningTime="2025-11-24 18:05:07.777202957 +0000 UTC m=+997.017944121" Nov 24 18:05:10 crc kubenswrapper[4702]: I1124 18:05:10.795399 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:05:10 crc kubenswrapper[4702]: E1124 18:05:10.797477 4702 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Nov 24 18:05:10 crc kubenswrapper[4702]: E1124 18:05:10.797664 4702 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Nov 24 18:05:10 crc kubenswrapper[4702]: E1124 18:05:10.797917 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift podName:1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb nodeName:}" failed. No retries permitted until 2025-11-24 18:05:26.797882814 +0000 UTC m=+1016.038624018 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift") pod "swift-storage-0" (UID: "1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb") : configmap "swift-ring-files" not found Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.586300 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-d47996487-cmbrh"] Nov 24 18:05:11 crc kubenswrapper[4702]: E1124 18:05:11.586633 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="pull" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.586656 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="pull" Nov 24 18:05:11 crc kubenswrapper[4702]: E1124 18:05:11.586675 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="util" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.586683 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="util" Nov 24 18:05:11 crc kubenswrapper[4702]: E1124 18:05:11.586720 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="extract" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.586728 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="extract" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.586908 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4181b945-25f4-44ce-8bf9-f9fd2b0b61fd" containerName="extract" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.587436 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.589722 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-service-cert" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.590120 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-sggcw" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.601458 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-d47996487-cmbrh"] Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.709225 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-apiservice-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.709320 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsztw\" (UniqueName: \"kubernetes.io/projected/ba679587-111b-43ee-bde5-a810fb5f605e-kube-api-access-rsztw\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.709409 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-webhook-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.810476 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-apiservice-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.810585 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsztw\" (UniqueName: \"kubernetes.io/projected/ba679587-111b-43ee-bde5-a810fb5f605e-kube-api-access-rsztw\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.810643 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-webhook-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.817479 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-apiservice-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.824402 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ba679587-111b-43ee-bde5-a810fb5f605e-webhook-cert\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.833362 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsztw\" (UniqueName: \"kubernetes.io/projected/ba679587-111b-43ee-bde5-a810fb5f605e-kube-api-access-rsztw\") pod \"glance-operator-controller-manager-d47996487-cmbrh\" (UID: \"ba679587-111b-43ee-bde5-a810fb5f605e\") " pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:11 crc kubenswrapper[4702]: I1124 18:05:11.908331 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:12 crc kubenswrapper[4702]: I1124 18:05:12.157588 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-d47996487-cmbrh"] Nov 24 18:05:12 crc kubenswrapper[4702]: I1124 18:05:12.774719 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" event={"ID":"ba679587-111b-43ee-bde5-a810fb5f605e","Type":"ContainerStarted","Data":"e147858275d0607eb081a36104a91e1916508e0a3109c02fb3829e178d71ae5c"} Nov 24 18:05:14 crc kubenswrapper[4702]: I1124 18:05:14.789633 4702 generic.go:334] "Generic (PLEG): container finished" podID="14a2829d-0ef6-4544-aae9-f4bd9ddb061d" containerID="64e02f6a8617e119eb4587f5c258e4285d757890c1106b3f00c13469e9d87104" exitCode=0 Nov 24 18:05:14 crc kubenswrapper[4702]: I1124 18:05:14.789716 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" event={"ID":"14a2829d-0ef6-4544-aae9-f4bd9ddb061d","Type":"ContainerDied","Data":"64e02f6a8617e119eb4587f5c258e4285d757890c1106b3f00c13469e9d87104"} Nov 24 18:05:14 crc kubenswrapper[4702]: I1124 18:05:14.792058 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" event={"ID":"ba679587-111b-43ee-bde5-a810fb5f605e","Type":"ContainerStarted","Data":"1bc5a0b305b6c4d605e5bfb24effc7f5f1d7e3576359531f48a358762e1de2bf"} Nov 24 18:05:14 crc kubenswrapper[4702]: I1124 18:05:14.792369 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:14 crc kubenswrapper[4702]: I1124 18:05:14.827743 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" podStartSLOduration=2.02518856 podStartE2EDuration="3.827726109s" podCreationTimestamp="2025-11-24 18:05:11 +0000 UTC" firstStartedPulling="2025-11-24 18:05:12.164982329 +0000 UTC m=+1001.405723493" lastFinishedPulling="2025-11-24 18:05:13.967519878 +0000 UTC m=+1003.208261042" observedRunningTime="2025-11-24 18:05:14.823269962 +0000 UTC m=+1004.064011146" watchObservedRunningTime="2025-11-24 18:05:14.827726109 +0000 UTC m=+1004.068467273" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.041558 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182204 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182330 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182379 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182471 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4222l\" (UniqueName: \"kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182525 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.182603 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts\") pod \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\" (UID: \"14a2829d-0ef6-4544-aae9-f4bd9ddb061d\") " Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.183348 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.183677 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.188431 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l" (OuterVolumeSpecName: "kube-api-access-4222l") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "kube-api-access-4222l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.191938 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.199536 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts" (OuterVolumeSpecName: "scripts") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.207004 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "14a2829d-0ef6-4544-aae9-f4bd9ddb061d" (UID: "14a2829d-0ef6-4544-aae9-f4bd9ddb061d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284333 4702 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284379 4702 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284391 4702 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284410 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4222l\" (UniqueName: \"kubernetes.io/projected/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-kube-api-access-4222l\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284424 4702 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.284435 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14a2829d-0ef6-4544-aae9-f4bd9ddb061d-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.806365 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" event={"ID":"14a2829d-0ef6-4544-aae9-f4bd9ddb061d","Type":"ContainerDied","Data":"d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0"} Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.806643 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1aee392db999f468df8a7d96f6807d6a7f5f0609de3eb324e77c1320b2daba0" Nov 24 18:05:16 crc kubenswrapper[4702]: I1124 18:05:16.806409 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-pkkdm" Nov 24 18:05:21 crc kubenswrapper[4702]: I1124 18:05:21.912626 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-d47996487-cmbrh" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.322201 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-qjpg4"] Nov 24 18:05:23 crc kubenswrapper[4702]: E1124 18:05:23.322459 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a2829d-0ef6-4544-aae9-f4bd9ddb061d" containerName="swift-ring-rebalance" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.322470 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a2829d-0ef6-4544-aae9-f4bd9ddb061d" containerName="swift-ring-rebalance" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.322601 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="14a2829d-0ef6-4544-aae9-f4bd9ddb061d" containerName="swift-ring-rebalance" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.323037 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.333689 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-7533-account-create-update-ckvrg"] Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.334529 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.336772 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.343226 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-qjpg4"] Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.407010 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstackclient"] Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.425237 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-7533-account-create-update-ckvrg"] Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.425374 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.447812 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"openstack-config-secret" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.447874 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-scripts-9db6gc427h" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.448475 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"default-dockercfg-2n7jz" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.448606 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-config" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.452674 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstackclient"] Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.486522 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blvww\" (UniqueName: \"kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.486602 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.486627 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfxtc\" (UniqueName: \"kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.486819 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587650 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587689 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-scripts\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587720 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blvww\" (UniqueName: \"kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587756 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587773 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfxtc\" (UniqueName: \"kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587852 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config-secret\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587889 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w22pq\" (UniqueName: \"kubernetes.io/projected/82389de7-41e2-4820-97e4-bccaf40d3fd6-kube-api-access-w22pq\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.587949 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.588550 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.588549 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.610070 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfxtc\" (UniqueName: \"kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc\") pod \"glance-7533-account-create-update-ckvrg\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.610108 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blvww\" (UniqueName: \"kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww\") pod \"glance-db-create-qjpg4\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.647408 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.659309 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.689070 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config-secret\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.689151 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w22pq\" (UniqueName: \"kubernetes.io/projected/82389de7-41e2-4820-97e4-bccaf40d3fd6-kube-api-access-w22pq\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.689236 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.689252 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-scripts\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.693228 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config-secret\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.693828 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-scripts\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.694978 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82389de7-41e2-4820-97e4-bccaf40d3fd6-openstack-config\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.713947 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w22pq\" (UniqueName: \"kubernetes.io/projected/82389de7-41e2-4820-97e4-bccaf40d3fd6-kube-api-access-w22pq\") pod \"openstackclient\" (UID: \"82389de7-41e2-4820-97e4-bccaf40d3fd6\") " pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:23 crc kubenswrapper[4702]: I1124 18:05:23.761631 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstackclient" Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.077694 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-7533-account-create-update-ckvrg"] Nov 24 18:05:24 crc kubenswrapper[4702]: W1124 18:05:24.083986 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6bd3660_2698_4a4f_9ddb_9bdfbf314101.slice/crio-d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd WatchSource:0}: Error finding container d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd: Status 404 returned error can't find the container with id d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.120318 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-qjpg4"] Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.174929 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstackclient"] Nov 24 18:05:24 crc kubenswrapper[4702]: W1124 18:05:24.178608 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82389de7_41e2_4820_97e4_bccaf40d3fd6.slice/crio-1f80624c79917aa6041f09e1075e75617baf3a17bc5ccf60a8c22c1e02c469cb WatchSource:0}: Error finding container 1f80624c79917aa6041f09e1075e75617baf3a17bc5ccf60a8c22c1e02c469cb: Status 404 returned error can't find the container with id 1f80624c79917aa6041f09e1075e75617baf3a17bc5ccf60a8c22c1e02c469cb Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.857893 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstackclient" event={"ID":"82389de7-41e2-4820-97e4-bccaf40d3fd6","Type":"ContainerStarted","Data":"1f80624c79917aa6041f09e1075e75617baf3a17bc5ccf60a8c22c1e02c469cb"} Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.859286 4702 generic.go:334] "Generic (PLEG): container finished" podID="6f209779-5517-4a4a-80d4-da7223983dde" containerID="026e5276b3070dfe579cce4db53ef0fe5834c55aae623db888637cc4bcfd772d" exitCode=0 Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.859320 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-qjpg4" event={"ID":"6f209779-5517-4a4a-80d4-da7223983dde","Type":"ContainerDied","Data":"026e5276b3070dfe579cce4db53ef0fe5834c55aae623db888637cc4bcfd772d"} Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.859352 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-qjpg4" event={"ID":"6f209779-5517-4a4a-80d4-da7223983dde","Type":"ContainerStarted","Data":"84560b8555fe6eae15d52bc41433bd0e54735a005318dbb6064118501b8426e2"} Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.860663 4702 generic.go:334] "Generic (PLEG): container finished" podID="b6bd3660-2698-4a4f-9ddb-9bdfbf314101" containerID="3a4316293c2d9a765657478c2906e5a8682ce745225654810e6c8749b5eb4bcb" exitCode=0 Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.860694 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" event={"ID":"b6bd3660-2698-4a4f-9ddb-9bdfbf314101","Type":"ContainerDied","Data":"3a4316293c2d9a765657478c2906e5a8682ce745225654810e6c8749b5eb4bcb"} Nov 24 18:05:24 crc kubenswrapper[4702]: I1124 18:05:24.860724 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" event={"ID":"b6bd3660-2698-4a4f-9ddb-9bdfbf314101","Type":"ContainerStarted","Data":"d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd"} Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.192411 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-proxy-547856594f-rvdc7"] Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.196939 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.207339 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-proxy-config-data" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.222764 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-proxy-547856594f-rvdc7"] Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.240888 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.246458 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334585 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfxtc\" (UniqueName: \"kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc\") pod \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334695 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts\") pod \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\" (UID: \"b6bd3660-2698-4a4f-9ddb-9bdfbf314101\") " Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334733 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts\") pod \"6f209779-5517-4a4a-80d4-da7223983dde\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334754 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blvww\" (UniqueName: \"kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww\") pod \"6f209779-5517-4a4a-80d4-da7223983dde\" (UID: \"6f209779-5517-4a4a-80d4-da7223983dde\") " Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334948 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2hpw\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-kube-api-access-w2hpw\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.334976 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-log-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.335001 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-etc-swift\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.335040 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-run-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.335090 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d65bf194-f05e-423a-a5b4-7acdce24e0c9-config-data\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.335942 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f209779-5517-4a4a-80d4-da7223983dde" (UID: "6f209779-5517-4a4a-80d4-da7223983dde"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.335992 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6bd3660-2698-4a4f-9ddb-9bdfbf314101" (UID: "b6bd3660-2698-4a4f-9ddb-9bdfbf314101"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.340018 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww" (OuterVolumeSpecName: "kube-api-access-blvww") pod "6f209779-5517-4a4a-80d4-da7223983dde" (UID: "6f209779-5517-4a4a-80d4-da7223983dde"). InnerVolumeSpecName "kube-api-access-blvww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.340066 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc" (OuterVolumeSpecName: "kube-api-access-tfxtc") pod "b6bd3660-2698-4a4f-9ddb-9bdfbf314101" (UID: "b6bd3660-2698-4a4f-9ddb-9bdfbf314101"). InnerVolumeSpecName "kube-api-access-tfxtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437069 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2hpw\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-kube-api-access-w2hpw\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437143 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-log-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437175 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-etc-swift\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437221 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-run-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437295 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d65bf194-f05e-423a-a5b4-7acdce24e0c9-config-data\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437369 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfxtc\" (UniqueName: \"kubernetes.io/projected/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-kube-api-access-tfxtc\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437386 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6bd3660-2698-4a4f-9ddb-9bdfbf314101-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437397 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f209779-5517-4a4a-80d4-da7223983dde-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.437409 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blvww\" (UniqueName: \"kubernetes.io/projected/6f209779-5517-4a4a-80d4-da7223983dde-kube-api-access-blvww\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.438507 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-log-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.438876 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d65bf194-f05e-423a-a5b4-7acdce24e0c9-run-httpd\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.442946 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-etc-swift\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.443088 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d65bf194-f05e-423a-a5b4-7acdce24e0c9-config-data\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.454825 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2hpw\" (UniqueName: \"kubernetes.io/projected/d65bf194-f05e-423a-a5b4-7acdce24e0c9-kube-api-access-w2hpw\") pod \"swift-proxy-547856594f-rvdc7\" (UID: \"d65bf194-f05e-423a-a5b4-7acdce24e0c9\") " pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.563369 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.843746 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.849651 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb-etc-swift\") pod \"swift-storage-0\" (UID: \"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb\") " pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.875388 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.875386 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-7533-account-create-update-ckvrg" event={"ID":"b6bd3660-2698-4a4f-9ddb-9bdfbf314101","Type":"ContainerDied","Data":"d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd"} Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.875441 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d00fff3d15a27d844ab4cf0fd8dbc1284e4d350c3a9c2e1fdb9a9ad6bcdfcbbd" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.877055 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-qjpg4" event={"ID":"6f209779-5517-4a4a-80d4-da7223983dde","Type":"ContainerDied","Data":"84560b8555fe6eae15d52bc41433bd0e54735a005318dbb6064118501b8426e2"} Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.877101 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84560b8555fe6eae15d52bc41433bd0e54735a005318dbb6064118501b8426e2" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.877133 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-qjpg4" Nov 24 18:05:26 crc kubenswrapper[4702]: I1124 18:05:26.974562 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-proxy-547856594f-rvdc7"] Nov 24 18:05:26 crc kubenswrapper[4702]: W1124 18:05:26.980051 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd65bf194_f05e_423a_a5b4_7acdce24e0c9.slice/crio-78382acb058788fee0da4822f24499301b3bc93f2b045b78498603b39d68465c WatchSource:0}: Error finding container 78382acb058788fee0da4822f24499301b3bc93f2b045b78498603b39d68465c: Status 404 returned error can't find the container with id 78382acb058788fee0da4822f24499301b3bc93f2b045b78498603b39d68465c Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.035108 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-storage-0" Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.486024 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Nov 24 18:05:27 crc kubenswrapper[4702]: W1124 18:05:27.494874 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1dd79ba8_8f74_4d74_a1a7_86b0dcb3a0cb.slice/crio-3715a453d7d18601b5daa90e7ee42884998068cbc7b42c9284d2cb90bcbbc578 WatchSource:0}: Error finding container 3715a453d7d18601b5daa90e7ee42884998068cbc7b42c9284d2cb90bcbbc578: Status 404 returned error can't find the container with id 3715a453d7d18601b5daa90e7ee42884998068cbc7b42c9284d2cb90bcbbc578 Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.888492 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" event={"ID":"d65bf194-f05e-423a-a5b4-7acdce24e0c9","Type":"ContainerStarted","Data":"b14fd02a405954b82dc3f4cf4d09faa7c6a29bcc975cad4676a1e37ea009fcc4"} Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.888541 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" event={"ID":"d65bf194-f05e-423a-a5b4-7acdce24e0c9","Type":"ContainerStarted","Data":"ae38a663584f1d9a1dbcaedeb8fae2910d2fd77e271063d373cdc8cff685df74"} Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.888554 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" event={"ID":"d65bf194-f05e-423a-a5b4-7acdce24e0c9","Type":"ContainerStarted","Data":"78382acb058788fee0da4822f24499301b3bc93f2b045b78498603b39d68465c"} Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.889009 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.889050 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:27 crc kubenswrapper[4702]: I1124 18:05:27.890592 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"3715a453d7d18601b5daa90e7ee42884998068cbc7b42c9284d2cb90bcbbc578"} Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.448044 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" podStartSLOduration=2.44802169 podStartE2EDuration="2.44802169s" podCreationTimestamp="2025-11-24 18:05:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:05:27.912132121 +0000 UTC m=+1017.152873285" watchObservedRunningTime="2025-11-24 18:05:28.44802169 +0000 UTC m=+1017.688762854" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.453268 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-v8j6c"] Nov 24 18:05:28 crc kubenswrapper[4702]: E1124 18:05:28.453651 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bd3660-2698-4a4f-9ddb-9bdfbf314101" containerName="mariadb-account-create-update" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.453675 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bd3660-2698-4a4f-9ddb-9bdfbf314101" containerName="mariadb-account-create-update" Nov 24 18:05:28 crc kubenswrapper[4702]: E1124 18:05:28.453691 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f209779-5517-4a4a-80d4-da7223983dde" containerName="mariadb-database-create" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.453704 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f209779-5517-4a4a-80d4-da7223983dde" containerName="mariadb-database-create" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.453901 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f209779-5517-4a4a-80d4-da7223983dde" containerName="mariadb-database-create" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.453924 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bd3660-2698-4a4f-9ddb-9bdfbf314101" containerName="mariadb-account-create-update" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.454515 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.456357 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-5kkrw" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.457059 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.475768 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-v8j6c"] Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.584408 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.584516 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bftw5\" (UniqueName: \"kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.584581 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.685665 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.685773 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.686722 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bftw5\" (UniqueName: \"kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.692308 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.692414 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.704892 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bftw5\" (UniqueName: \"kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5\") pod \"glance-db-sync-v8j6c\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:28 crc kubenswrapper[4702]: I1124 18:05:28.804076 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.451948 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-v8j6c"] Nov 24 18:05:36 crc kubenswrapper[4702]: W1124 18:05:36.455617 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60a60b23_c17e_4447_b116_99442faaf1a7.slice/crio-4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10 WatchSource:0}: Error finding container 4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10: Status 404 returned error can't find the container with id 4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10 Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.565693 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.566342 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/swift-proxy-547856594f-rvdc7" Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.949978 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstackclient" event={"ID":"82389de7-41e2-4820-97e4-bccaf40d3fd6","Type":"ContainerStarted","Data":"7fd05053a5ca81ae08899e2b756bb4dc6031e836b7fe784088d713e40609e807"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.951044 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-v8j6c" event={"ID":"60a60b23-c17e-4447-b116-99442faaf1a7","Type":"ContainerStarted","Data":"4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.953244 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"80fcccff432b78aeae6082cb2c1c0adf27bdbc83d9fa229d2bb3ccc1f5578826"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.953296 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"11916938d7a23b983ae1b128464fa1713e589a217b13696d150e56d80ad6a459"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.953310 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"54c5e96904781f057c2c9e23bfed2438e5afb596ab37b567fc8492da7c55c919"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.953322 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"b39714bd4cd2eef9369ff597c8406a5dbfd99e4501b14a01e0ed5bf64c6474e0"} Nov 24 18:05:36 crc kubenswrapper[4702]: I1124 18:05:36.969356 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstackclient" podStartSLOduration=2.11262671 podStartE2EDuration="13.969333145s" podCreationTimestamp="2025-11-24 18:05:23 +0000 UTC" firstStartedPulling="2025-11-24 18:05:24.180917819 +0000 UTC m=+1013.421658983" lastFinishedPulling="2025-11-24 18:05:36.037624254 +0000 UTC m=+1025.278365418" observedRunningTime="2025-11-24 18:05:36.967313973 +0000 UTC m=+1026.208055167" watchObservedRunningTime="2025-11-24 18:05:36.969333145 +0000 UTC m=+1026.210074309" Nov 24 18:05:37 crc kubenswrapper[4702]: I1124 18:05:37.963657 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"a1ca959d180bef15bda9617ca8e685eb74c1436fc121366ba76854379f460131"} Nov 24 18:05:38 crc kubenswrapper[4702]: I1124 18:05:38.977331 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"df4c19d92f15986cd318c5572ae86987f036fffafb8c5d206e669f1e2e654eeb"} Nov 24 18:05:38 crc kubenswrapper[4702]: I1124 18:05:38.977665 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"3de39080068eee3a973fa57d0e017653db8225a6a49e84bf8730473b4082da7e"} Nov 24 18:05:38 crc kubenswrapper[4702]: I1124 18:05:38.977676 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"7277c4385e345819e92e039a4a36fc8cfa378541898dbce854bdc8426ffa5d9a"} Nov 24 18:05:39 crc kubenswrapper[4702]: I1124 18:05:39.993730 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"08a9432c4721205c6061b34363d132f08351d0d425b4cd0434f8734f2cbfc5ed"} Nov 24 18:05:39 crc kubenswrapper[4702]: I1124 18:05:39.994183 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"835ef9c38c36b5c0e7a4fc46cbcac900368bf4657e6cf3c8ec42475e5ca5a6d8"} Nov 24 18:05:39 crc kubenswrapper[4702]: I1124 18:05:39.994194 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"e59bb822780cf68f544b7496fc228be70485a2e262b9055b7916b9df5850f8d5"} Nov 24 18:05:41 crc kubenswrapper[4702]: I1124 18:05:41.008213 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"065064a54cd66da29d14d4bbbfe04305291340ba25fac930243fc92998d29c89"} Nov 24 18:05:41 crc kubenswrapper[4702]: I1124 18:05:41.008275 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"8a684a3b6248f66d2a2aa897fe4ccf6be325140c178bd2a341378ec4cd4914a4"} Nov 24 18:05:41 crc kubenswrapper[4702]: I1124 18:05:41.008295 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"49bdb4e2c73bcf70a1e01de7ea514238fc8ab88680243d4ef17caee19788e15e"} Nov 24 18:05:41 crc kubenswrapper[4702]: I1124 18:05:41.008310 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb","Type":"ContainerStarted","Data":"8be97635648983ea2fd7dc6412b8225a29d05bf09d1eea818e8e3cfaba2847d4"} Nov 24 18:05:41 crc kubenswrapper[4702]: I1124 18:05:41.043222 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-storage-0" podStartSLOduration=36.19450593 podStartE2EDuration="48.043201286s" podCreationTimestamp="2025-11-24 18:04:53 +0000 UTC" firstStartedPulling="2025-11-24 18:05:27.497848367 +0000 UTC m=+1016.738589531" lastFinishedPulling="2025-11-24 18:05:39.346543723 +0000 UTC m=+1028.587284887" observedRunningTime="2025-11-24 18:05:41.037784735 +0000 UTC m=+1030.278525919" watchObservedRunningTime="2025-11-24 18:05:41.043201286 +0000 UTC m=+1030.283942450" Nov 24 18:05:48 crc kubenswrapper[4702]: I1124 18:05:48.090965 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-v8j6c" event={"ID":"60a60b23-c17e-4447-b116-99442faaf1a7","Type":"ContainerStarted","Data":"97e3b250a2227d505a38be8797939315cd3f0cfcbb65b3ed471e0ff50dfa828f"} Nov 24 18:05:48 crc kubenswrapper[4702]: I1124 18:05:48.106070 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-v8j6c" podStartSLOduration=8.952886578 podStartE2EDuration="20.106052088s" podCreationTimestamp="2025-11-24 18:05:28 +0000 UTC" firstStartedPulling="2025-11-24 18:05:36.458672053 +0000 UTC m=+1025.699413217" lastFinishedPulling="2025-11-24 18:05:47.611837573 +0000 UTC m=+1036.852578727" observedRunningTime="2025-11-24 18:05:48.103335397 +0000 UTC m=+1037.344076581" watchObservedRunningTime="2025-11-24 18:05:48.106052088 +0000 UTC m=+1037.346793252" Nov 24 18:05:55 crc kubenswrapper[4702]: I1124 18:05:55.139756 4702 generic.go:334] "Generic (PLEG): container finished" podID="60a60b23-c17e-4447-b116-99442faaf1a7" containerID="97e3b250a2227d505a38be8797939315cd3f0cfcbb65b3ed471e0ff50dfa828f" exitCode=0 Nov 24 18:05:55 crc kubenswrapper[4702]: I1124 18:05:55.139856 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-v8j6c" event={"ID":"60a60b23-c17e-4447-b116-99442faaf1a7","Type":"ContainerDied","Data":"97e3b250a2227d505a38be8797939315cd3f0cfcbb65b3ed471e0ff50dfa828f"} Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.430950 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.608863 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data\") pod \"60a60b23-c17e-4447-b116-99442faaf1a7\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.608943 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data\") pod \"60a60b23-c17e-4447-b116-99442faaf1a7\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.609112 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bftw5\" (UniqueName: \"kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5\") pod \"60a60b23-c17e-4447-b116-99442faaf1a7\" (UID: \"60a60b23-c17e-4447-b116-99442faaf1a7\") " Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.615917 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5" (OuterVolumeSpecName: "kube-api-access-bftw5") pod "60a60b23-c17e-4447-b116-99442faaf1a7" (UID: "60a60b23-c17e-4447-b116-99442faaf1a7"). InnerVolumeSpecName "kube-api-access-bftw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.616164 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "60a60b23-c17e-4447-b116-99442faaf1a7" (UID: "60a60b23-c17e-4447-b116-99442faaf1a7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.646006 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data" (OuterVolumeSpecName: "config-data") pod "60a60b23-c17e-4447-b116-99442faaf1a7" (UID: "60a60b23-c17e-4447-b116-99442faaf1a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.711459 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bftw5\" (UniqueName: \"kubernetes.io/projected/60a60b23-c17e-4447-b116-99442faaf1a7-kube-api-access-bftw5\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.711501 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:56 crc kubenswrapper[4702]: I1124 18:05:56.711517 4702 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/60a60b23-c17e-4447-b116-99442faaf1a7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:05:57 crc kubenswrapper[4702]: I1124 18:05:57.154689 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-v8j6c" event={"ID":"60a60b23-c17e-4447-b116-99442faaf1a7","Type":"ContainerDied","Data":"4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10"} Nov 24 18:05:57 crc kubenswrapper[4702]: I1124 18:05:57.154992 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4883e84ee0863f65708c2da56820a5f1ed38876e88448647614a719b846f4f10" Nov 24 18:05:57 crc kubenswrapper[4702]: I1124 18:05:57.154758 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-v8j6c" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.388546 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:05:58 crc kubenswrapper[4702]: E1124 18:05:58.388820 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60a60b23-c17e-4447-b116-99442faaf1a7" containerName="glance-db-sync" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.388832 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="60a60b23-c17e-4447-b116-99442faaf1a7" containerName="glance-db-sync" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.389005 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="60a60b23-c17e-4447-b116-99442faaf1a7" containerName="glance-db-sync" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.389710 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.392044 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.392482 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-single-config-data" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.392563 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-5kkrw" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.403115 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.523575 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.525038 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536733 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536772 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536793 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536845 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536869 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536948 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.536977 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537042 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537069 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537122 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537185 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537211 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537265 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc5wm\" (UniqueName: \"kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.537353 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.541695 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638431 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638498 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638533 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638563 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638591 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638608 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638616 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638748 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638843 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638875 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638939 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638973 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639035 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639042 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639062 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639069 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639085 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.638991 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639077 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639115 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639172 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639197 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639204 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639227 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639265 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639304 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639324 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639342 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639362 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639443 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639463 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639483 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639508 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm6wn\" (UniqueName: \"kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639540 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc5wm\" (UniqueName: \"kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639612 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639639 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639662 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") device mount path \"/mnt/openstack/pv06\"" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639698 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") device mount path \"/mnt/openstack/pv10\"" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.639932 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.648026 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.648498 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.661783 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.662085 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc5wm\" (UniqueName: \"kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.662214 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-single-1\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.706707 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741280 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741318 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741371 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741389 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741419 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741439 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741468 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741485 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741504 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm6wn\" (UniqueName: \"kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741532 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741548 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741594 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741620 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741691 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741787 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.741842 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.742573 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.742586 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.742628 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.742717 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.743367 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") device mount path \"/mnt/openstack/pv05\"" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.744262 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.744309 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.744510 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.744545 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.746499 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.748323 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.763123 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm6wn\" (UniqueName: \"kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.765008 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.768336 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:58 crc kubenswrapper[4702]: I1124 18:05:58.841092 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:05:59 crc kubenswrapper[4702]: I1124 18:05:59.040007 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:05:59 crc kubenswrapper[4702]: W1124 18:05:59.046433 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49ef5229_ab5d_4f9c_91c6_b6abb07e6270.slice/crio-81d599e7519c6c1565aeaf639a6d19adc4cba6ce8095d1055f507ef9295ec91e WatchSource:0}: Error finding container 81d599e7519c6c1565aeaf639a6d19adc4cba6ce8095d1055f507ef9295ec91e: Status 404 returned error can't find the container with id 81d599e7519c6c1565aeaf639a6d19adc4cba6ce8095d1055f507ef9295ec91e Nov 24 18:05:59 crc kubenswrapper[4702]: I1124 18:05:59.119369 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:05:59 crc kubenswrapper[4702]: W1124 18:05:59.123839 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac0ccf36_1249_4a3f_b411_fd2df924a5ad.slice/crio-b9a8dda2eec94b9d2c52a034e025bc0f261c8ec33ea2f472e39c6fd3618d5528 WatchSource:0}: Error finding container b9a8dda2eec94b9d2c52a034e025bc0f261c8ec33ea2f472e39c6fd3618d5528: Status 404 returned error can't find the container with id b9a8dda2eec94b9d2c52a034e025bc0f261c8ec33ea2f472e39c6fd3618d5528 Nov 24 18:05:59 crc kubenswrapper[4702]: I1124 18:05:59.170814 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerStarted","Data":"b9a8dda2eec94b9d2c52a034e025bc0f261c8ec33ea2f472e39c6fd3618d5528"} Nov 24 18:05:59 crc kubenswrapper[4702]: I1124 18:05:59.172403 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerStarted","Data":"81d599e7519c6c1565aeaf639a6d19adc4cba6ce8095d1055f507ef9295ec91e"} Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.180497 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerStarted","Data":"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7"} Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.181043 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerStarted","Data":"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346"} Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.183505 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerStarted","Data":"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd"} Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.183540 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerStarted","Data":"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74"} Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.213311 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-1" podStartSLOduration=2.2132918950000002 podStartE2EDuration="2.213291895s" podCreationTimestamp="2025-11-24 18:05:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:00.207886974 +0000 UTC m=+1049.448628168" watchObservedRunningTime="2025-11-24 18:06:00.213291895 +0000 UTC m=+1049.454033059" Nov 24 18:06:00 crc kubenswrapper[4702]: I1124 18:06:00.235257 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=3.235227566 podStartE2EDuration="3.235227566s" podCreationTimestamp="2025-11-24 18:05:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:00.227635888 +0000 UTC m=+1049.468377072" watchObservedRunningTime="2025-11-24 18:06:00.235227566 +0000 UTC m=+1049.475968730" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.707452 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.707959 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.731163 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.743264 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.842407 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.842471 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.861751 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:08 crc kubenswrapper[4702]: I1124 18:06:08.875229 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:09 crc kubenswrapper[4702]: I1124 18:06:09.250615 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:09 crc kubenswrapper[4702]: I1124 18:06:09.250669 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:09 crc kubenswrapper[4702]: I1124 18:06:09.250683 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:09 crc kubenswrapper[4702]: I1124 18:06:09.250696 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.353730 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.354465 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.384662 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.384753 4702 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.396255 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.444070 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:11 crc kubenswrapper[4702]: I1124 18:06:11.477591 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:13 crc kubenswrapper[4702]: I1124 18:06:13.280686 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-log" containerID="cri-o://a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74" gracePeriod=30 Nov 24 18:06:13 crc kubenswrapper[4702]: I1124 18:06:13.280754 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-httpd" containerID="cri-o://ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd" gracePeriod=30 Nov 24 18:06:13 crc kubenswrapper[4702]: I1124 18:06:13.289488 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-0" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.0.102:9292/healthcheck\": EOF" Nov 24 18:06:13 crc kubenswrapper[4702]: I1124 18:06:13.289955 4702 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-0" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.102:9292/healthcheck\": EOF" Nov 24 18:06:14 crc kubenswrapper[4702]: I1124 18:06:14.291720 4702 generic.go:334] "Generic (PLEG): container finished" podID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerID="a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74" exitCode=143 Nov 24 18:06:14 crc kubenswrapper[4702]: I1124 18:06:14.291781 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerDied","Data":"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74"} Nov 24 18:06:16 crc kubenswrapper[4702]: I1124 18:06:16.861400 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002349 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002418 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002440 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002470 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002519 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002527 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys" (OuterVolumeSpecName: "sys") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002557 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002578 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002568 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002623 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002660 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002685 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002702 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002735 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002779 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zm6wn\" (UniqueName: \"kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.002847 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick\") pod \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\" (UID: \"49ef5229-ab5d-4f9c-91c6-b6abb07e6270\") " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003135 4702 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-sys\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003173 4702 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-lib-modules\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003226 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003266 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003305 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev" (OuterVolumeSpecName: "dev") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003318 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run" (OuterVolumeSpecName: "run") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003345 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003587 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.003741 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs" (OuterVolumeSpecName: "logs") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.011948 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts" (OuterVolumeSpecName: "scripts") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.011970 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.011974 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn" (OuterVolumeSpecName: "kube-api-access-zm6wn") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "kube-api-access-zm6wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.017212 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.056830 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data" (OuterVolumeSpecName: "config-data") pod "49ef5229-ab5d-4f9c-91c6-b6abb07e6270" (UID: "49ef5229-ab5d-4f9c-91c6-b6abb07e6270"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104489 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104538 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104551 4702 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104563 4702 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-iscsi\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104578 4702 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-logs\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104589 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104600 4702 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-dev\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104610 4702 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-etc-nvme\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104636 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104645 4702 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104655 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zm6wn\" (UniqueName: \"kubernetes.io/projected/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-kube-api-access-zm6wn\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.104665 4702 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49ef5229-ab5d-4f9c-91c6-b6abb07e6270-var-locks-brick\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.117125 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.117135 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.206089 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.206124 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.317238 4702 generic.go:334] "Generic (PLEG): container finished" podID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerID="ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd" exitCode=0 Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.317284 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerDied","Data":"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd"} Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.317308 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.317327 4702 scope.go:117] "RemoveContainer" containerID="ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.317315 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"49ef5229-ab5d-4f9c-91c6-b6abb07e6270","Type":"ContainerDied","Data":"81d599e7519c6c1565aeaf639a6d19adc4cba6ce8095d1055f507ef9295ec91e"} Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.341715 4702 scope.go:117] "RemoveContainer" containerID="a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.354228 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.361880 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.370852 4702 scope.go:117] "RemoveContainer" containerID="ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd" Nov 24 18:06:17 crc kubenswrapper[4702]: E1124 18:06:17.371230 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd\": container with ID starting with ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd not found: ID does not exist" containerID="ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.371343 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd"} err="failed to get container status \"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd\": rpc error: code = NotFound desc = could not find container \"ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd\": container with ID starting with ffd1358bcd04bd063f1382abade196f4fb9a551edb234871ed069e09e4209ffd not found: ID does not exist" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.371370 4702 scope.go:117] "RemoveContainer" containerID="a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74" Nov 24 18:06:17 crc kubenswrapper[4702]: E1124 18:06:17.371730 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74\": container with ID starting with a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74 not found: ID does not exist" containerID="a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.371816 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74"} err="failed to get container status \"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74\": rpc error: code = NotFound desc = could not find container \"a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74\": container with ID starting with a248c99d81de05c4c56e98dafdd65e0360b4f84cf15ae4f206229612b0133c74 not found: ID does not exist" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.380068 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:17 crc kubenswrapper[4702]: E1124 18:06:17.380390 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-log" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.380408 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-log" Nov 24 18:06:17 crc kubenswrapper[4702]: E1124 18:06:17.380424 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-httpd" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.380434 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-httpd" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.380597 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-httpd" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.380617 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" containerName="glance-log" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.381450 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.396962 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511060 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511195 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl7wh\" (UniqueName: \"kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511239 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511259 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511293 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511339 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511422 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511466 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511484 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511497 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511515 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511589 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511622 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.511681 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612729 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612849 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl7wh\" (UniqueName: \"kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612884 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612888 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612911 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612972 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.612986 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613021 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613031 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613063 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613083 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613124 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613140 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613155 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613169 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613573 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613231 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613306 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") device mount path \"/mnt/openstack/pv05\"" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613640 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613263 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613708 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613731 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613771 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.613925 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.614207 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.626254 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.626938 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.638489 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl7wh\" (UniqueName: \"kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.642417 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.642520 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.657400 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef5229-ab5d-4f9c-91c6-b6abb07e6270" path="/var/lib/kubelet/pods/49ef5229-ab5d-4f9c-91c6-b6abb07e6270/volumes" Nov 24 18:06:17 crc kubenswrapper[4702]: I1124 18:06:17.705240 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:18 crc kubenswrapper[4702]: I1124 18:06:18.162877 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:18 crc kubenswrapper[4702]: I1124 18:06:18.328257 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerStarted","Data":"d7832731f0f72c1458607ae591dfc6d6b8c28b5dd388ff4a98808d1a1fb2539e"} Nov 24 18:06:19 crc kubenswrapper[4702]: I1124 18:06:19.340935 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerStarted","Data":"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4"} Nov 24 18:06:19 crc kubenswrapper[4702]: I1124 18:06:19.341511 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerStarted","Data":"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c"} Nov 24 18:06:19 crc kubenswrapper[4702]: I1124 18:06:19.374386 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=2.374362427 podStartE2EDuration="2.374362427s" podCreationTimestamp="2025-11-24 18:06:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:19.36760862 +0000 UTC m=+1068.608349794" watchObservedRunningTime="2025-11-24 18:06:19.374362427 +0000 UTC m=+1068.615103611" Nov 24 18:06:27 crc kubenswrapper[4702]: I1124 18:06:27.705528 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:27 crc kubenswrapper[4702]: I1124 18:06:27.707340 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:27 crc kubenswrapper[4702]: I1124 18:06:27.729889 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:27 crc kubenswrapper[4702]: I1124 18:06:27.742017 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:28 crc kubenswrapper[4702]: I1124 18:06:28.402336 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:28 crc kubenswrapper[4702]: I1124 18:06:28.402388 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:30 crc kubenswrapper[4702]: I1124 18:06:30.384928 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:30 crc kubenswrapper[4702]: I1124 18:06:30.388847 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:45 crc kubenswrapper[4702]: I1124 18:06:45.944993 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-v8j6c"] Nov 24 18:06:45 crc kubenswrapper[4702]: I1124 18:06:45.950777 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-v8j6c"] Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.053626 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-slzq6"] Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.054551 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.056565 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"combined-ca-bundle" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.056878 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.062339 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-slzq6"] Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.138729 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.139050 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.139170 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27bjz\" (UniqueName: \"kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.139351 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.240073 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.240148 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.240186 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.240231 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27bjz\" (UniqueName: \"kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.246069 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.246162 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.247396 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.259407 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27bjz\" (UniqueName: \"kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz\") pod \"glance-db-sync-slzq6\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.379258 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:46 crc kubenswrapper[4702]: I1124 18:06:46.788000 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-slzq6"] Nov 24 18:06:47 crc kubenswrapper[4702]: I1124 18:06:47.549474 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-slzq6" event={"ID":"73449a39-251c-4404-a535-6a91eca63a77","Type":"ContainerStarted","Data":"3bf99c29832af57966e8820bb2787f9a47c6a7b62dd131deb4605f7aaa3105a7"} Nov 24 18:06:47 crc kubenswrapper[4702]: I1124 18:06:47.550033 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-slzq6" event={"ID":"73449a39-251c-4404-a535-6a91eca63a77","Type":"ContainerStarted","Data":"152cad1eeeddd5bf5d6af7af8723d65e0535baccd671a3bf3cff1b096bc2b02f"} Nov 24 18:06:47 crc kubenswrapper[4702]: I1124 18:06:47.572399 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-slzq6" podStartSLOduration=1.572328854 podStartE2EDuration="1.572328854s" podCreationTimestamp="2025-11-24 18:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:47.568016643 +0000 UTC m=+1096.808757807" watchObservedRunningTime="2025-11-24 18:06:47.572328854 +0000 UTC m=+1096.813070018" Nov 24 18:06:47 crc kubenswrapper[4702]: I1124 18:06:47.662551 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60a60b23-c17e-4447-b116-99442faaf1a7" path="/var/lib/kubelet/pods/60a60b23-c17e-4447-b116-99442faaf1a7/volumes" Nov 24 18:06:50 crc kubenswrapper[4702]: I1124 18:06:50.574315 4702 generic.go:334] "Generic (PLEG): container finished" podID="73449a39-251c-4404-a535-6a91eca63a77" containerID="3bf99c29832af57966e8820bb2787f9a47c6a7b62dd131deb4605f7aaa3105a7" exitCode=0 Nov 24 18:06:50 crc kubenswrapper[4702]: I1124 18:06:50.574443 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-slzq6" event={"ID":"73449a39-251c-4404-a535-6a91eca63a77","Type":"ContainerDied","Data":"3bf99c29832af57966e8820bb2787f9a47c6a7b62dd131deb4605f7aaa3105a7"} Nov 24 18:06:51 crc kubenswrapper[4702]: I1124 18:06:51.856021 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.034389 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data\") pod \"73449a39-251c-4404-a535-6a91eca63a77\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.034437 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data\") pod \"73449a39-251c-4404-a535-6a91eca63a77\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.034459 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle\") pod \"73449a39-251c-4404-a535-6a91eca63a77\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.034563 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27bjz\" (UniqueName: \"kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz\") pod \"73449a39-251c-4404-a535-6a91eca63a77\" (UID: \"73449a39-251c-4404-a535-6a91eca63a77\") " Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.039419 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz" (OuterVolumeSpecName: "kube-api-access-27bjz") pod "73449a39-251c-4404-a535-6a91eca63a77" (UID: "73449a39-251c-4404-a535-6a91eca63a77"). InnerVolumeSpecName "kube-api-access-27bjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.041380 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "73449a39-251c-4404-a535-6a91eca63a77" (UID: "73449a39-251c-4404-a535-6a91eca63a77"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.056501 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73449a39-251c-4404-a535-6a91eca63a77" (UID: "73449a39-251c-4404-a535-6a91eca63a77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.077197 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data" (OuterVolumeSpecName: "config-data") pod "73449a39-251c-4404-a535-6a91eca63a77" (UID: "73449a39-251c-4404-a535-6a91eca63a77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.136040 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27bjz\" (UniqueName: \"kubernetes.io/projected/73449a39-251c-4404-a535-6a91eca63a77-kube-api-access-27bjz\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.136084 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.136097 4702 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.136113 4702 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73449a39-251c-4404-a535-6a91eca63a77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.482730 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.483106 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.590313 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-slzq6" event={"ID":"73449a39-251c-4404-a535-6a91eca63a77","Type":"ContainerDied","Data":"152cad1eeeddd5bf5d6af7af8723d65e0535baccd671a3bf3cff1b096bc2b02f"} Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.590362 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="152cad1eeeddd5bf5d6af7af8723d65e0535baccd671a3bf3cff1b096bc2b02f" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.590386 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-slzq6" Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.765635 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.765980 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-log" containerID="cri-o://641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346" gracePeriod=30 Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.766097 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-httpd" containerID="cri-o://c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7" gracePeriod=30 Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.776646 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.776996 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-log" containerID="cri-o://4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4" gracePeriod=30 Nov 24 18:06:52 crc kubenswrapper[4702]: I1124 18:06:52.777182 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-httpd" containerID="cri-o://f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c" gracePeriod=30 Nov 24 18:06:53 crc kubenswrapper[4702]: I1124 18:06:53.599779 4702 generic.go:334] "Generic (PLEG): container finished" podID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerID="4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4" exitCode=143 Nov 24 18:06:53 crc kubenswrapper[4702]: I1124 18:06:53.599848 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerDied","Data":"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4"} Nov 24 18:06:53 crc kubenswrapper[4702]: I1124 18:06:53.601221 4702 generic.go:334] "Generic (PLEG): container finished" podID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerID="641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346" exitCode=143 Nov 24 18:06:53 crc kubenswrapper[4702]: I1124 18:06:53.601244 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerDied","Data":"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346"} Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.294527 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.300629 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.397416 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.397875 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.397592 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev" (OuterVolumeSpecName: "dev") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.397908 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398014 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398057 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc5wm\" (UniqueName: \"kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398098 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398144 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398186 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398232 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398230 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398270 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398292 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398327 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398367 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398392 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\" (UID: \"ac0ccf36-1249-4a3f-b411-fd2df924a5ad\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398485 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398570 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398595 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run" (OuterVolumeSpecName: "run") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398635 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398667 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398676 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs" (OuterVolumeSpecName: "logs") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.398742 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys" (OuterVolumeSpecName: "sys") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399198 4702 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-var-locks-brick\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399276 4702 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-dev\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399296 4702 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-logs\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399312 4702 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399326 4702 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-lib-modules\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399341 4702 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-sys\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399355 4702 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-nvme\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399368 4702 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-etc-iscsi\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.399383 4702 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.402842 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance-cache") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.403921 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts" (OuterVolumeSpecName: "scripts") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.403341 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm" (OuterVolumeSpecName: "kube-api-access-bc5wm") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "kube-api-access-bc5wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.406652 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.432050 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data" (OuterVolumeSpecName: "config-data") pod "ac0ccf36-1249-4a3f-b411-fd2df924a5ad" (UID: "ac0ccf36-1249-4a3f-b411-fd2df924a5ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500140 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dl7wh\" (UniqueName: \"kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500198 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500277 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500300 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500319 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500344 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500376 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500395 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500411 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500431 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500460 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500494 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500517 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500564 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\" (UID: \"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef\") " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500398 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run" (OuterVolumeSpecName: "run") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500398 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys" (OuterVolumeSpecName: "sys") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500642 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500687 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500432 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500453 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev" (OuterVolumeSpecName: "dev") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500600 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500663 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.500899 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs" (OuterVolumeSpecName: "logs") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501229 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501256 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc5wm\" (UniqueName: \"kubernetes.io/projected/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-kube-api-access-bc5wm\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501271 4702 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-sys\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501282 4702 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501293 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac0ccf36-1249-4a3f-b411-fd2df924a5ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501303 4702 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-nvme\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501313 4702 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-logs\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501325 4702 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-dev\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501357 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501369 4702 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-etc-iscsi\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501385 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501396 4702 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-var-locks-brick\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501408 4702 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-lib-modules\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.501419 4702 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.502893 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.503652 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh" (OuterVolumeSpecName: "kube-api-access-dl7wh") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "kube-api-access-dl7wh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.503710 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts" (OuterVolumeSpecName: "scripts") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.504125 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.514932 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.517891 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.532280 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data" (OuterVolumeSpecName: "config-data") pod "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" (UID: "4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.602844 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.602882 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dl7wh\" (UniqueName: \"kubernetes.io/projected/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-kube-api-access-dl7wh\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.602892 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.602909 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.602921 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.603609 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.603647 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.614974 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.615651 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.622481 4702 generic.go:334] "Generic (PLEG): container finished" podID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerID="c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7" exitCode=0 Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.622551 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.622548 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerDied","Data":"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7"} Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.622603 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"ac0ccf36-1249-4a3f-b411-fd2df924a5ad","Type":"ContainerDied","Data":"b9a8dda2eec94b9d2c52a034e025bc0f261c8ec33ea2f472e39c6fd3618d5528"} Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.622621 4702 scope.go:117] "RemoveContainer" containerID="c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.624389 4702 generic.go:334] "Generic (PLEG): container finished" podID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerID="f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c" exitCode=0 Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.624410 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerDied","Data":"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c"} Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.624424 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef","Type":"ContainerDied","Data":"d7832731f0f72c1458607ae591dfc6d6b8c28b5dd388ff4a98808d1a1fb2539e"} Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.624476 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.650447 4702 scope.go:117] "RemoveContainer" containerID="641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.660212 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.665444 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.672282 4702 scope.go:117] "RemoveContainer" containerID="c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7" Nov 24 18:06:56 crc kubenswrapper[4702]: E1124 18:06:56.672793 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7\": container with ID starting with c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7 not found: ID does not exist" containerID="c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.672855 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7"} err="failed to get container status \"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7\": rpc error: code = NotFound desc = could not find container \"c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7\": container with ID starting with c2a295853dcc657bbc56dd12e349a7d3c9082c5a8957a27ed11c77b84692f7c7 not found: ID does not exist" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.672883 4702 scope.go:117] "RemoveContainer" containerID="641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346" Nov 24 18:06:56 crc kubenswrapper[4702]: E1124 18:06:56.673912 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346\": container with ID starting with 641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346 not found: ID does not exist" containerID="641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.673934 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346"} err="failed to get container status \"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346\": rpc error: code = NotFound desc = could not find container \"641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346\": container with ID starting with 641ccaee1a1f996959ca1f0b002f983af851108fca221bc936f35985d848f346 not found: ID does not exist" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.673947 4702 scope.go:117] "RemoveContainer" containerID="f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.674996 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.679692 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.690229 4702 scope.go:117] "RemoveContainer" containerID="4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.703840 4702 scope.go:117] "RemoveContainer" containerID="f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.704428 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.704447 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:56 crc kubenswrapper[4702]: E1124 18:06:56.704493 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c\": container with ID starting with f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c not found: ID does not exist" containerID="f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.705365 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c"} err="failed to get container status \"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c\": rpc error: code = NotFound desc = could not find container \"f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c\": container with ID starting with f30d7c966210c0e296f85c2349c69ec237c73ee5f3e14c8ae46ee2a9a5ca316c not found: ID does not exist" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.705399 4702 scope.go:117] "RemoveContainer" containerID="4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4" Nov 24 18:06:56 crc kubenswrapper[4702]: E1124 18:06:56.705712 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4\": container with ID starting with 4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4 not found: ID does not exist" containerID="4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4" Nov 24 18:06:56 crc kubenswrapper[4702]: I1124 18:06:56.705749 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4"} err="failed to get container status \"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4\": rpc error: code = NotFound desc = could not find container \"4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4\": container with ID starting with 4bfdcbd1d6bf74974d8ffa6b68153f33b5ae1ae4e75e44ed14be667f53c9b5f4 not found: ID does not exist" Nov 24 18:06:57 crc kubenswrapper[4702]: I1124 18:06:57.658501 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" path="/var/lib/kubelet/pods/4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef/volumes" Nov 24 18:06:57 crc kubenswrapper[4702]: I1124 18:06:57.659340 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" path="/var/lib/kubelet/pods/ac0ccf36-1249-4a3f-b411-fd2df924a5ad/volumes" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109093 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:58 crc kubenswrapper[4702]: E1124 18:06:58.109399 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109418 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: E1124 18:06:58.109435 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109443 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: E1124 18:06:58.109467 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109477 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: E1124 18:06:58.109489 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73449a39-251c-4404-a535-6a91eca63a77" containerName="glance-db-sync" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109496 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="73449a39-251c-4404-a535-6a91eca63a77" containerName="glance-db-sync" Nov 24 18:06:58 crc kubenswrapper[4702]: E1124 18:06:58.109510 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109517 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109669 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109682 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-log" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109702 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a3e1af5-12de-44a9-b6d9-0e3ca0eb96ef" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109724 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac0ccf36-1249-4a3f-b411-fd2df924a5ad" containerName="glance-httpd" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.109738 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="73449a39-251c-4404-a535-6a91eca63a77" containerName="glance-db-sync" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.110593 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.112847 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"cert-glance-default-internal-svc" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.112890 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"combined-ca-bundle" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.113634 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"cert-glance-default-public-svc" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.114056 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.114419 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-5kkrw" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.116264 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-single-config-data" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.133546 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226490 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctf5t\" (UniqueName: \"kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226627 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226650 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226683 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226719 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226733 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226767 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226787 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.226840 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328003 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328060 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328127 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328289 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") device mount path \"/mnt/openstack/pv05\"" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328434 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328473 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328506 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328535 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.328657 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.329184 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.329349 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctf5t\" (UniqueName: \"kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.329493 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.335382 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.335546 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.335726 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.336217 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.337465 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.353221 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctf5t\" (UniqueName: \"kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.354665 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-single-0\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.436561 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:06:58 crc kubenswrapper[4702]: I1124 18:06:58.870300 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:06:58 crc kubenswrapper[4702]: W1124 18:06:58.874269 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3fc4dad_cf30_49c7_bf43_34ee1c447e66.slice/crio-28d880a6ef144d2796b18bee2bdfe77af8a3b8f6e57c1465a7da3a3305dedb45 WatchSource:0}: Error finding container 28d880a6ef144d2796b18bee2bdfe77af8a3b8f6e57c1465a7da3a3305dedb45: Status 404 returned error can't find the container with id 28d880a6ef144d2796b18bee2bdfe77af8a3b8f6e57c1465a7da3a3305dedb45 Nov 24 18:06:59 crc kubenswrapper[4702]: I1124 18:06:59.662126 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerStarted","Data":"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e"} Nov 24 18:06:59 crc kubenswrapper[4702]: I1124 18:06:59.662415 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerStarted","Data":"28d880a6ef144d2796b18bee2bdfe77af8a3b8f6e57c1465a7da3a3305dedb45"} Nov 24 18:07:00 crc kubenswrapper[4702]: I1124 18:07:00.668710 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerStarted","Data":"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901"} Nov 24 18:07:00 crc kubenswrapper[4702]: I1124 18:07:00.687298 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=2.68727669 podStartE2EDuration="2.68727669s" podCreationTimestamp="2025-11-24 18:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:00.685156833 +0000 UTC m=+1109.925898017" watchObservedRunningTime="2025-11-24 18:07:00.68727669 +0000 UTC m=+1109.928017854" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.437880 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.438449 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.471523 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.496034 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.725764 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:08 crc kubenswrapper[4702]: I1124 18:07:08.725845 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:10 crc kubenswrapper[4702]: I1124 18:07:10.631968 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:10 crc kubenswrapper[4702]: I1124 18:07:10.634865 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.359794 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-slzq6"] Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.364350 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-slzq6"] Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.394228 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance7533-account-delete-tschn"] Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.395375 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.406377 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance7533-account-delete-tschn"] Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.440588 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.520777 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rtld\" (UniqueName: \"kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.520997 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.622029 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.622125 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rtld\" (UniqueName: \"kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.622900 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.641364 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rtld\" (UniqueName: \"kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld\") pod \"glance7533-account-delete-tschn\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.675237 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73449a39-251c-4404-a535-6a91eca63a77" path="/var/lib/kubelet/pods/73449a39-251c-4404-a535-6a91eca63a77/volumes" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.712768 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.747117 4702 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="glance-kuttl-tests/glance-default-single-0" secret="" err="secret \"glance-glance-dockercfg-5kkrw\" not found" Nov 24 18:07:11 crc kubenswrapper[4702]: E1124 18:07:11.826936 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-scripts: secret "glance-scripts" not found Nov 24 18:07:11 crc kubenswrapper[4702]: E1124 18:07:11.827200 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:12.327185907 +0000 UTC m=+1121.567927061 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-scripts" not found Nov 24 18:07:11 crc kubenswrapper[4702]: E1124 18:07:11.827013 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-default-single-config-data: secret "glance-default-single-config-data" not found Nov 24 18:07:11 crc kubenswrapper[4702]: E1124 18:07:11.827484 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:12.327466885 +0000 UTC m=+1121.568208049 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-default-single-config-data" not found Nov 24 18:07:11 crc kubenswrapper[4702]: I1124 18:07:11.939423 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance7533-account-delete-tschn"] Nov 24 18:07:12 crc kubenswrapper[4702]: E1124 18:07:12.332979 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-scripts: secret "glance-scripts" not found Nov 24 18:07:12 crc kubenswrapper[4702]: E1124 18:07:12.333323 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:13.333307039 +0000 UTC m=+1122.574048203 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-scripts" not found Nov 24 18:07:12 crc kubenswrapper[4702]: E1124 18:07:12.333232 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-default-single-config-data: secret "glance-default-single-config-data" not found Nov 24 18:07:12 crc kubenswrapper[4702]: E1124 18:07:12.333654 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:13.333643858 +0000 UTC m=+1122.574385022 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-default-single-config-data" not found Nov 24 18:07:12 crc kubenswrapper[4702]: I1124 18:07:12.754358 4702 generic.go:334] "Generic (PLEG): container finished" podID="8a14960a-272d-48a7-be90-079886f98ad4" containerID="9c79b6ae1776f03c2e85977f2b22917ef244f7aadf9356d731bb355fbfb5c95c" exitCode=0 Nov 24 18:07:12 crc kubenswrapper[4702]: I1124 18:07:12.754455 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance7533-account-delete-tschn" event={"ID":"8a14960a-272d-48a7-be90-079886f98ad4","Type":"ContainerDied","Data":"9c79b6ae1776f03c2e85977f2b22917ef244f7aadf9356d731bb355fbfb5c95c"} Nov 24 18:07:12 crc kubenswrapper[4702]: I1124 18:07:12.754559 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance7533-account-delete-tschn" event={"ID":"8a14960a-272d-48a7-be90-079886f98ad4","Type":"ContainerStarted","Data":"bd141a3607b57b47eb0a70e29ccbea5b0540e66a74e9b9f0d72fefab12d6c04b"} Nov 24 18:07:12 crc kubenswrapper[4702]: I1124 18:07:12.754700 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-log" containerID="cri-o://4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e" gracePeriod=30 Nov 24 18:07:12 crc kubenswrapper[4702]: I1124 18:07:12.754823 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-httpd" containerID="cri-o://f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901" gracePeriod=30 Nov 24 18:07:13 crc kubenswrapper[4702]: E1124 18:07:13.346636 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-default-single-config-data: secret "glance-default-single-config-data" not found Nov 24 18:07:13 crc kubenswrapper[4702]: E1124 18:07:13.347071 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:15.347043834 +0000 UTC m=+1124.587785038 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-default-single-config-data" not found Nov 24 18:07:13 crc kubenswrapper[4702]: E1124 18:07:13.346729 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-scripts: secret "glance-scripts" not found Nov 24 18:07:13 crc kubenswrapper[4702]: E1124 18:07:13.347179 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:15.347156767 +0000 UTC m=+1124.587897931 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-scripts" not found Nov 24 18:07:13 crc kubenswrapper[4702]: I1124 18:07:13.765584 4702 generic.go:334] "Generic (PLEG): container finished" podID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerID="4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e" exitCode=143 Nov 24 18:07:13 crc kubenswrapper[4702]: I1124 18:07:13.765679 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerDied","Data":"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e"} Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.066773 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.156816 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rtld\" (UniqueName: \"kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld\") pod \"8a14960a-272d-48a7-be90-079886f98ad4\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.156965 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts\") pod \"8a14960a-272d-48a7-be90-079886f98ad4\" (UID: \"8a14960a-272d-48a7-be90-079886f98ad4\") " Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.157694 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a14960a-272d-48a7-be90-079886f98ad4" (UID: "8a14960a-272d-48a7-be90-079886f98ad4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.162600 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld" (OuterVolumeSpecName: "kube-api-access-2rtld") pod "8a14960a-272d-48a7-be90-079886f98ad4" (UID: "8a14960a-272d-48a7-be90-079886f98ad4"). InnerVolumeSpecName "kube-api-access-2rtld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.258963 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rtld\" (UniqueName: \"kubernetes.io/projected/8a14960a-272d-48a7-be90-079886f98ad4-kube-api-access-2rtld\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.258998 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a14960a-272d-48a7-be90-079886f98ad4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.774100 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance7533-account-delete-tschn" event={"ID":"8a14960a-272d-48a7-be90-079886f98ad4","Type":"ContainerDied","Data":"bd141a3607b57b47eb0a70e29ccbea5b0540e66a74e9b9f0d72fefab12d6c04b"} Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.774132 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd141a3607b57b47eb0a70e29ccbea5b0540e66a74e9b9f0d72fefab12d6c04b" Nov 24 18:07:14 crc kubenswrapper[4702]: I1124 18:07:14.774154 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance7533-account-delete-tschn" Nov 24 18:07:15 crc kubenswrapper[4702]: E1124 18:07:15.376806 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-default-single-config-data: secret "glance-default-single-config-data" not found Nov 24 18:07:15 crc kubenswrapper[4702]: E1124 18:07:15.377212 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:19.377192247 +0000 UTC m=+1128.617933421 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-default-single-config-data" not found Nov 24 18:07:15 crc kubenswrapper[4702]: E1124 18:07:15.376871 4702 secret.go:188] Couldn't get secret glance-kuttl-tests/glance-scripts: secret "glance-scripts" not found Nov 24 18:07:15 crc kubenswrapper[4702]: E1124 18:07:15.377415 4702 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts podName:b3fc4dad-cf30-49c7-bf43-34ee1c447e66 nodeName:}" failed. No retries permitted until 2025-11-24 18:07:19.377404132 +0000 UTC m=+1128.618145296 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts") pod "glance-default-single-0" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66") : secret "glance-scripts" not found Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.274332 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.393856 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctf5t\" (UniqueName: \"kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.393920 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.394037 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.394079 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.394109 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.394179 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.395183 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.395230 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.395282 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs\") pod \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\" (UID: \"b3fc4dad-cf30-49c7-bf43-34ee1c447e66\") " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.395992 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.395741 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs" (OuterVolumeSpecName: "logs") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.396248 4702 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-logs\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.396272 4702 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.402311 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts" (OuterVolumeSpecName: "scripts") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.402485 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.402733 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t" (OuterVolumeSpecName: "kube-api-access-ctf5t") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "kube-api-access-ctf5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.420898 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-qjpg4"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.428907 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-qjpg4"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.434511 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-7533-account-create-update-ckvrg"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.439014 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance7533-account-delete-tschn"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.442252 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.444997 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-7533-account-create-update-ckvrg"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.446139 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.450199 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance7533-account-delete-tschn"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.450444 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.453845 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data" (OuterVolumeSpecName: "config-data") pod "b3fc4dad-cf30-49c7-bf43-34ee1c447e66" (UID: "b3fc4dad-cf30-49c7-bf43-34ee1c447e66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.497493 4702 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.497786 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.497905 4702 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.497998 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.498058 4702 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.498121 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctf5t\" (UniqueName: \"kubernetes.io/projected/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-kube-api-access-ctf5t\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.498189 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fc4dad-cf30-49c7-bf43-34ee1c447e66-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.511183 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.599515 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.792312 4702 generic.go:334] "Generic (PLEG): container finished" podID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerID="f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901" exitCode=0 Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.792381 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.792394 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerDied","Data":"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901"} Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.792487 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b3fc4dad-cf30-49c7-bf43-34ee1c447e66","Type":"ContainerDied","Data":"28d880a6ef144d2796b18bee2bdfe77af8a3b8f6e57c1465a7da3a3305dedb45"} Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.792521 4702 scope.go:117] "RemoveContainer" containerID="f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.823163 4702 scope.go:117] "RemoveContainer" containerID="4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.826900 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.832832 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.841291 4702 scope.go:117] "RemoveContainer" containerID="f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901" Nov 24 18:07:16 crc kubenswrapper[4702]: E1124 18:07:16.842428 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901\": container with ID starting with f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901 not found: ID does not exist" containerID="f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.842488 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901"} err="failed to get container status \"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901\": rpc error: code = NotFound desc = could not find container \"f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901\": container with ID starting with f77e1cbf4ad6c068cd73f725ffd329a0b879aa3bf42d51fe339a069d4a608901 not found: ID does not exist" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.842524 4702 scope.go:117] "RemoveContainer" containerID="4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e" Nov 24 18:07:16 crc kubenswrapper[4702]: E1124 18:07:16.843378 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e\": container with ID starting with 4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e not found: ID does not exist" containerID="4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e" Nov 24 18:07:16 crc kubenswrapper[4702]: I1124 18:07:16.843433 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e"} err="failed to get container status \"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e\": rpc error: code = NotFound desc = could not find container \"4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e\": container with ID starting with 4e241e661a955b4cb422641edf3e9083cb3c0f071b590e49fed925e55357206e not found: ID does not exist" Nov 24 18:07:17 crc kubenswrapper[4702]: I1124 18:07:17.656045 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f209779-5517-4a4a-80d4-da7223983dde" path="/var/lib/kubelet/pods/6f209779-5517-4a4a-80d4-da7223983dde/volumes" Nov 24 18:07:17 crc kubenswrapper[4702]: I1124 18:07:17.656926 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a14960a-272d-48a7-be90-079886f98ad4" path="/var/lib/kubelet/pods/8a14960a-272d-48a7-be90-079886f98ad4/volumes" Nov 24 18:07:17 crc kubenswrapper[4702]: I1124 18:07:17.657562 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" path="/var/lib/kubelet/pods/b3fc4dad-cf30-49c7-bf43-34ee1c447e66/volumes" Nov 24 18:07:17 crc kubenswrapper[4702]: I1124 18:07:17.658750 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6bd3660-2698-4a4f-9ddb-9bdfbf314101" path="/var/lib/kubelet/pods/b6bd3660-2698-4a4f-9ddb-9bdfbf314101/volumes" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.217907 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-x2lkz"] Nov 24 18:07:18 crc kubenswrapper[4702]: E1124 18:07:18.218231 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-log" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218247 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-log" Nov 24 18:07:18 crc kubenswrapper[4702]: E1124 18:07:18.218261 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-httpd" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218268 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-httpd" Nov 24 18:07:18 crc kubenswrapper[4702]: E1124 18:07:18.218292 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a14960a-272d-48a7-be90-079886f98ad4" containerName="mariadb-account-delete" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218299 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a14960a-272d-48a7-be90-079886f98ad4" containerName="mariadb-account-delete" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218423 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-httpd" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218435 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a14960a-272d-48a7-be90-079886f98ad4" containerName="mariadb-account-delete" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218450 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3fc4dad-cf30-49c7-bf43-34ee1c447e66" containerName="glance-log" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.218917 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.260639 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6"] Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.261667 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.263929 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.267638 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-x2lkz"] Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.272403 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6"] Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.324690 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.324741 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp6qc\" (UniqueName: \"kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.324763 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.325040 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p67vh\" (UniqueName: \"kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.426333 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p67vh\" (UniqueName: \"kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.426414 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.426439 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp6qc\" (UniqueName: \"kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.426465 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.427310 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.427406 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.443673 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p67vh\" (UniqueName: \"kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh\") pod \"glance-1d6a-account-create-update-l4dz6\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.444820 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp6qc\" (UniqueName: \"kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc\") pod \"glance-db-create-x2lkz\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.555439 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.584056 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:18 crc kubenswrapper[4702]: I1124 18:07:18.952672 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-x2lkz"] Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.023977 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6"] Nov 24 18:07:19 crc kubenswrapper[4702]: W1124 18:07:19.029674 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod693f2168_99fc_4548_bf88_af6518389c3e.slice/crio-7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36 WatchSource:0}: Error finding container 7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36: Status 404 returned error can't find the container with id 7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36 Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.814531 4702 generic.go:334] "Generic (PLEG): container finished" podID="693f2168-99fc-4548-bf88-af6518389c3e" containerID="07001201be966a74f935baa50edbd9110b1f176513aecaeaddfc8f767bca56ce" exitCode=0 Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.814593 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" event={"ID":"693f2168-99fc-4548-bf88-af6518389c3e","Type":"ContainerDied","Data":"07001201be966a74f935baa50edbd9110b1f176513aecaeaddfc8f767bca56ce"} Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.814618 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" event={"ID":"693f2168-99fc-4548-bf88-af6518389c3e","Type":"ContainerStarted","Data":"7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36"} Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.816046 4702 generic.go:334] "Generic (PLEG): container finished" podID="4dce0c6e-8084-4b53-b7d9-3615741d0f11" containerID="fa698811d1744afb78f66fd76ced80f9ccad41a56d3dda4899a5d09c48aedb72" exitCode=0 Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.816079 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-x2lkz" event={"ID":"4dce0c6e-8084-4b53-b7d9-3615741d0f11","Type":"ContainerDied","Data":"fa698811d1744afb78f66fd76ced80f9ccad41a56d3dda4899a5d09c48aedb72"} Nov 24 18:07:19 crc kubenswrapper[4702]: I1124 18:07:19.816101 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-x2lkz" event={"ID":"4dce0c6e-8084-4b53-b7d9-3615741d0f11","Type":"ContainerStarted","Data":"66d22c98cf8496b038f9a11e19c49688c8e4bb88b14ab4aac0954848bf77007d"} Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.133280 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.140473 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.267438 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts\") pod \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.267547 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p67vh\" (UniqueName: \"kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh\") pod \"693f2168-99fc-4548-bf88-af6518389c3e\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.267579 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts\") pod \"693f2168-99fc-4548-bf88-af6518389c3e\" (UID: \"693f2168-99fc-4548-bf88-af6518389c3e\") " Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.267641 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hp6qc\" (UniqueName: \"kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc\") pod \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\" (UID: \"4dce0c6e-8084-4b53-b7d9-3615741d0f11\") " Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.268698 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "693f2168-99fc-4548-bf88-af6518389c3e" (UID: "693f2168-99fc-4548-bf88-af6518389c3e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.268859 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4dce0c6e-8084-4b53-b7d9-3615741d0f11" (UID: "4dce0c6e-8084-4b53-b7d9-3615741d0f11"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.273103 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc" (OuterVolumeSpecName: "kube-api-access-hp6qc") pod "4dce0c6e-8084-4b53-b7d9-3615741d0f11" (UID: "4dce0c6e-8084-4b53-b7d9-3615741d0f11"). InnerVolumeSpecName "kube-api-access-hp6qc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.273252 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh" (OuterVolumeSpecName: "kube-api-access-p67vh") pod "693f2168-99fc-4548-bf88-af6518389c3e" (UID: "693f2168-99fc-4548-bf88-af6518389c3e"). InnerVolumeSpecName "kube-api-access-p67vh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.369574 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dce0c6e-8084-4b53-b7d9-3615741d0f11-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.369610 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p67vh\" (UniqueName: \"kubernetes.io/projected/693f2168-99fc-4548-bf88-af6518389c3e-kube-api-access-p67vh\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.369624 4702 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693f2168-99fc-4548-bf88-af6518389c3e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.369632 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hp6qc\" (UniqueName: \"kubernetes.io/projected/4dce0c6e-8084-4b53-b7d9-3615741d0f11-kube-api-access-hp6qc\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.836561 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.836840 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6" event={"ID":"693f2168-99fc-4548-bf88-af6518389c3e","Type":"ContainerDied","Data":"7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36"} Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.836897 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b76f03b3138706b2a96a9cd360da5b37c97deadc04e62b4d179796010ce8c36" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.839044 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-x2lkz" event={"ID":"4dce0c6e-8084-4b53-b7d9-3615741d0f11","Type":"ContainerDied","Data":"66d22c98cf8496b038f9a11e19c49688c8e4bb88b14ab4aac0954848bf77007d"} Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.839086 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66d22c98cf8496b038f9a11e19c49688c8e4bb88b14ab4aac0954848bf77007d" Nov 24 18:07:21 crc kubenswrapper[4702]: I1124 18:07:21.839116 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-x2lkz" Nov 24 18:07:22 crc kubenswrapper[4702]: I1124 18:07:22.483065 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:07:22 crc kubenswrapper[4702]: I1124 18:07:22.483128 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.425938 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-bglfv"] Nov 24 18:07:23 crc kubenswrapper[4702]: E1124 18:07:23.426278 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="693f2168-99fc-4548-bf88-af6518389c3e" containerName="mariadb-account-create-update" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.426292 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="693f2168-99fc-4548-bf88-af6518389c3e" containerName="mariadb-account-create-update" Nov 24 18:07:23 crc kubenswrapper[4702]: E1124 18:07:23.426327 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dce0c6e-8084-4b53-b7d9-3615741d0f11" containerName="mariadb-database-create" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.426339 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dce0c6e-8084-4b53-b7d9-3615741d0f11" containerName="mariadb-database-create" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.426490 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dce0c6e-8084-4b53-b7d9-3615741d0f11" containerName="mariadb-database-create" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.426507 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="693f2168-99fc-4548-bf88-af6518389c3e" containerName="mariadb-account-create-update" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.427161 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.429777 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.429907 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-6r5ks" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.433729 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bglfv"] Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.505238 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.505363 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2c6t\" (UniqueName: \"kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.505477 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.606308 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.606387 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.606433 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2c6t\" (UniqueName: \"kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.612280 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.615751 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.622523 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2c6t\" (UniqueName: \"kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t\") pod \"glance-db-sync-bglfv\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:23 crc kubenswrapper[4702]: I1124 18:07:23.754060 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:24 crc kubenswrapper[4702]: I1124 18:07:24.159332 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bglfv"] Nov 24 18:07:24 crc kubenswrapper[4702]: I1124 18:07:24.860684 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bglfv" event={"ID":"3488ae92-0040-4207-b23e-5c38723cebb3","Type":"ContainerStarted","Data":"b55126b97c32842db46158d3483b300ad8f569cae749b93188625ce6c95485b6"} Nov 24 18:07:24 crc kubenswrapper[4702]: I1124 18:07:24.861057 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bglfv" event={"ID":"3488ae92-0040-4207-b23e-5c38723cebb3","Type":"ContainerStarted","Data":"b0660d7eae544cf56a3a9ae059327899e3d46aab2c93c2db5e52f72f8cdb6bc4"} Nov 24 18:07:24 crc kubenswrapper[4702]: I1124 18:07:24.882564 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-bglfv" podStartSLOduration=1.88254677 podStartE2EDuration="1.88254677s" podCreationTimestamp="2025-11-24 18:07:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:24.877018302 +0000 UTC m=+1134.117759476" watchObservedRunningTime="2025-11-24 18:07:24.88254677 +0000 UTC m=+1134.123287934" Nov 24 18:07:27 crc kubenswrapper[4702]: I1124 18:07:27.881185 4702 generic.go:334] "Generic (PLEG): container finished" podID="3488ae92-0040-4207-b23e-5c38723cebb3" containerID="b55126b97c32842db46158d3483b300ad8f569cae749b93188625ce6c95485b6" exitCode=0 Nov 24 18:07:27 crc kubenswrapper[4702]: I1124 18:07:27.881241 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bglfv" event={"ID":"3488ae92-0040-4207-b23e-5c38723cebb3","Type":"ContainerDied","Data":"b55126b97c32842db46158d3483b300ad8f569cae749b93188625ce6c95485b6"} Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.147500 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.188923 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data\") pod \"3488ae92-0040-4207-b23e-5c38723cebb3\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.189006 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2c6t\" (UniqueName: \"kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t\") pod \"3488ae92-0040-4207-b23e-5c38723cebb3\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.189072 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data\") pod \"3488ae92-0040-4207-b23e-5c38723cebb3\" (UID: \"3488ae92-0040-4207-b23e-5c38723cebb3\") " Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.202032 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t" (OuterVolumeSpecName: "kube-api-access-h2c6t") pod "3488ae92-0040-4207-b23e-5c38723cebb3" (UID: "3488ae92-0040-4207-b23e-5c38723cebb3"). InnerVolumeSpecName "kube-api-access-h2c6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.202014 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3488ae92-0040-4207-b23e-5c38723cebb3" (UID: "3488ae92-0040-4207-b23e-5c38723cebb3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.232412 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data" (OuterVolumeSpecName: "config-data") pod "3488ae92-0040-4207-b23e-5c38723cebb3" (UID: "3488ae92-0040-4207-b23e-5c38723cebb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.290654 4702 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.290697 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2c6t\" (UniqueName: \"kubernetes.io/projected/3488ae92-0040-4207-b23e-5c38723cebb3-kube-api-access-h2c6t\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.290709 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3488ae92-0040-4207-b23e-5c38723cebb3-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.897215 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bglfv" event={"ID":"3488ae92-0040-4207-b23e-5c38723cebb3","Type":"ContainerDied","Data":"b0660d7eae544cf56a3a9ae059327899e3d46aab2c93c2db5e52f72f8cdb6bc4"} Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.897303 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0660d7eae544cf56a3a9ae059327899e3d46aab2c93c2db5e52f72f8cdb6bc4" Nov 24 18:07:29 crc kubenswrapper[4702]: I1124 18:07:29.897321 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bglfv" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.898939 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Nov 24 18:07:30 crc kubenswrapper[4702]: E1124 18:07:30.900371 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3488ae92-0040-4207-b23e-5c38723cebb3" containerName="glance-db-sync" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.900454 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="3488ae92-0040-4207-b23e-5c38723cebb3" containerName="glance-db-sync" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.900690 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="3488ae92-0040-4207-b23e-5c38723cebb3" containerName="glance-db-sync" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.902164 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.905445 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.905465 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-6r5ks" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.909915 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-external-config-data" Nov 24 18:07:30 crc kubenswrapper[4702]: I1124 18:07:30.917384 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015738 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-logs\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015791 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-dev\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015828 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-sys\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015848 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015933 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.015982 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-scripts\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016082 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016126 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016156 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016200 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-config-data\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016224 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016252 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p2jm\" (UniqueName: \"kubernetes.io/projected/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-kube-api-access-4p2jm\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016286 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.016308 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117743 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117815 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117839 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117876 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-config-data\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117893 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117912 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p2jm\" (UniqueName: \"kubernetes.io/projected/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-kube-api-access-4p2jm\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117942 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117962 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117957 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117995 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-logs\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118055 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.117957 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118172 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-dev\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118198 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-sys\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118217 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118236 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118252 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-scripts\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118266 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-dev\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118273 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-sys\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118317 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") device mount path \"/mnt/openstack/pv07\"" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118331 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118326 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118317 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") device mount path \"/mnt/openstack/pv09\"" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.118512 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.119173 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-logs\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.126200 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-config-data\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.126738 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-scripts\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.143742 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p2jm\" (UniqueName: \"kubernetes.io/projected/31cf16bb-c1c3-4dbb-9a07-c6317c2e070a-kube-api-access-4p2jm\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.144851 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.146051 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a\") " pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.225209 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.321519 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.323141 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.330251 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.351313 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.421913 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422222 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422253 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422282 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422327 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422343 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422357 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422378 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422394 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbs8n\" (UniqueName: \"kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422417 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422436 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422472 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422492 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.422512 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.523698 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.523745 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.523776 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.523936 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.524021 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.524149 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.526653 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.526833 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.526860 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.526923 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.526978 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527045 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527043 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527339 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527600 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527664 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbs8n\" (UniqueName: \"kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527705 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527761 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527855 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527902 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527937 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.527978 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.528025 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.528025 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.528195 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.534628 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.535053 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.543866 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbs8n\" (UniqueName: \"kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.545476 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.557468 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.646302 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.733496 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.846774 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.912046 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a","Type":"ContainerStarted","Data":"e9793a8e5574afbfba3f1834826040f856cb098c634a16d6798538606524d02f"} Nov 24 18:07:31 crc kubenswrapper[4702]: I1124 18:07:31.912083 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a","Type":"ContainerStarted","Data":"e477079575293ed6fd51228faca8943f649e886bd80f17b84eab6182f366c6f4"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.106247 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:32 crc kubenswrapper[4702]: W1124 18:07:32.124427 4702 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9abfc19_b09d_4c84_bb5b_ae4dc83dae37.slice/crio-06ac71bbfc6595c3140d2f79bc1e291adb18bf72d231903a2c9a65e60612a6aa WatchSource:0}: Error finding container 06ac71bbfc6595c3140d2f79bc1e291adb18bf72d231903a2c9a65e60612a6aa: Status 404 returned error can't find the container with id 06ac71bbfc6595c3140d2f79bc1e291adb18bf72d231903a2c9a65e60612a6aa Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.920169 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a","Type":"ContainerStarted","Data":"791358aaca839a2da7ec188e6776a9cf3028d437b5ebae76d48d1e523b40cae2"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.920724 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"31cf16bb-c1c3-4dbb-9a07-c6317c2e070a","Type":"ContainerStarted","Data":"b55bb08bf2f12633832b19721e18a3874435a66cbd78b74f2a9611182542b6f9"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923004 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerStarted","Data":"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923056 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerStarted","Data":"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923068 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerStarted","Data":"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923078 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerStarted","Data":"06ac71bbfc6595c3140d2f79bc1e291adb18bf72d231903a2c9a65e60612a6aa"} Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923183 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-log" containerID="cri-o://7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" gracePeriod=30 Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923219 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-httpd" containerID="cri-o://286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" gracePeriod=30 Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.923228 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-api" containerID="cri-o://b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" gracePeriod=30 Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.949090 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-0" podStartSLOduration=2.949074718 podStartE2EDuration="2.949074718s" podCreationTimestamp="2025-11-24 18:07:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:32.94687563 +0000 UTC m=+1142.187616804" watchObservedRunningTime="2025-11-24 18:07:32.949074718 +0000 UTC m=+1142.189815872" Nov 24 18:07:32 crc kubenswrapper[4702]: I1124 18:07:32.977593 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=2.977573314 podStartE2EDuration="2.977573314s" podCreationTimestamp="2025-11-24 18:07:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:32.97373352 +0000 UTC m=+1142.214474714" watchObservedRunningTime="2025-11-24 18:07:32.977573314 +0000 UTC m=+1142.218314488" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.332644 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458495 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458530 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458585 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458614 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458628 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458670 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458688 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458705 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458731 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458732 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run" (OuterVolumeSpecName: "run") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458756 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458786 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbs8n\" (UniqueName: \"kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458842 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458875 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458875 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458900 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi\") pod \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\" (UID: \"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37\") " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458946 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458979 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.459065 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.459094 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs" (OuterVolumeSpecName: "logs") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.459099 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys" (OuterVolumeSpecName: "sys") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.458759 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev" (OuterVolumeSpecName: "dev") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.459415 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.459986 4702 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-iscsi\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460049 4702 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460090 4702 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-etc-nvme\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460105 4702 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-dev\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460117 4702 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-logs\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460131 4702 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460164 4702 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-lib-modules\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460179 4702 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-sys\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.460191 4702 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-var-locks-brick\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.463872 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.465958 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts" (OuterVolumeSpecName: "scripts") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.466476 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.476493 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n" (OuterVolumeSpecName: "kube-api-access-tbs8n") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "kube-api-access-tbs8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.535199 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data" (OuterVolumeSpecName: "config-data") pod "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" (UID: "d9abfc19-b09d-4c84-bb5b-ae4dc83dae37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.561613 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.561652 4702 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.561664 4702 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.561674 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbs8n\" (UniqueName: \"kubernetes.io/projected/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-kube-api-access-tbs8n\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.561685 4702 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.575580 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.578812 4702 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.662787 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.662838 4702 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932413 4702 generic.go:334] "Generic (PLEG): container finished" podID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" exitCode=143 Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932439 4702 generic.go:334] "Generic (PLEG): container finished" podID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" exitCode=143 Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932452 4702 generic.go:334] "Generic (PLEG): container finished" podID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" exitCode=143 Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932490 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932527 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerDied","Data":"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf"} Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932581 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerDied","Data":"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40"} Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932593 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerDied","Data":"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad"} Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932606 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"d9abfc19-b09d-4c84-bb5b-ae4dc83dae37","Type":"ContainerDied","Data":"06ac71bbfc6595c3140d2f79bc1e291adb18bf72d231903a2c9a65e60612a6aa"} Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.932623 4702 scope.go:117] "RemoveContainer" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.955217 4702 scope.go:117] "RemoveContainer" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.958332 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.964893 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.980884 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:33 crc kubenswrapper[4702]: E1124 18:07:33.981167 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-httpd" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981185 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-httpd" Nov 24 18:07:33 crc kubenswrapper[4702]: E1124 18:07:33.981200 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-log" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981206 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-log" Nov 24 18:07:33 crc kubenswrapper[4702]: E1124 18:07:33.981215 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-api" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981220 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-api" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981345 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-api" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981358 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-log" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.981366 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" containerName="glance-httpd" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.982314 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.983647 4702 scope.go:117] "RemoveContainer" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" Nov 24 18:07:33 crc kubenswrapper[4702]: I1124 18:07:33.984196 4702 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.009991 4702 scope.go:117] "RemoveContainer" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" Nov 24 18:07:34 crc kubenswrapper[4702]: E1124 18:07:34.013976 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": container with ID starting with b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf not found: ID does not exist" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.014085 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf"} err="failed to get container status \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": rpc error: code = NotFound desc = could not find container \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": container with ID starting with b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.014119 4702 scope.go:117] "RemoveContainer" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" Nov 24 18:07:34 crc kubenswrapper[4702]: E1124 18:07:34.014888 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": container with ID starting with 286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40 not found: ID does not exist" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.015002 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40"} err="failed to get container status \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": rpc error: code = NotFound desc = could not find container \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": container with ID starting with 286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40 not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.015095 4702 scope.go:117] "RemoveContainer" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" Nov 24 18:07:34 crc kubenswrapper[4702]: E1124 18:07:34.015539 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": container with ID starting with 7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad not found: ID does not exist" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.015667 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad"} err="failed to get container status \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": rpc error: code = NotFound desc = could not find container \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": container with ID starting with 7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.015744 4702 scope.go:117] "RemoveContainer" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.016108 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf"} err="failed to get container status \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": rpc error: code = NotFound desc = could not find container \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": container with ID starting with b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.016192 4702 scope.go:117] "RemoveContainer" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.021550 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40"} err="failed to get container status \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": rpc error: code = NotFound desc = could not find container \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": container with ID starting with 286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40 not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.021636 4702 scope.go:117] "RemoveContainer" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.022927 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad"} err="failed to get container status \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": rpc error: code = NotFound desc = could not find container \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": container with ID starting with 7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.022970 4702 scope.go:117] "RemoveContainer" containerID="b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.024544 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf"} err="failed to get container status \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": rpc error: code = NotFound desc = could not find container \"b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf\": container with ID starting with b474deff17f0a76525bb1b5be6f309d7332646d5f103a86e079c846fac183caf not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.024591 4702 scope.go:117] "RemoveContainer" containerID="286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.025211 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40"} err="failed to get container status \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": rpc error: code = NotFound desc = could not find container \"286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40\": container with ID starting with 286f99e0ba5fdb08d9495dceb11d38e9f5578311aa45b4696265269dc9d74b40 not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.025252 4702 scope.go:117] "RemoveContainer" containerID="7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.026468 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad"} err="failed to get container status \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": rpc error: code = NotFound desc = could not find container \"7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad\": container with ID starting with 7b89fc37b50c46b6e198e3ba7dd4d5a3fb6dbe1f38400f1b0759ba0706f9afad not found: ID does not exist" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.052134 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170555 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170648 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170678 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170708 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170744 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-logs\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170780 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-dev\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.170862 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-sys\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.171816 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.171892 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.171965 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.172042 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.172067 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2x8g\" (UniqueName: \"kubernetes.io/projected/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-kube-api-access-s2x8g\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.172107 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.172130 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273256 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-logs\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273308 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-sys\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273325 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-dev\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273341 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273369 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273400 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273402 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-sys\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273428 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273454 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273480 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-dev\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273506 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273574 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2x8g\" (UniqueName: \"kubernetes.io/projected/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-kube-api-access-s2x8g\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273594 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273605 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273677 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273725 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273766 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-logs\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273854 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273885 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.273910 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.274142 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.274160 4702 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.274180 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.274201 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.274370 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.278175 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.279290 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.296818 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.301201 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2x8g\" (UniqueName: \"kubernetes.io/projected/7ce6c7a2-7aeb-4ad5-9339-08635b05cefe-kube-api-access-s2x8g\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.314045 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.606755 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.917358 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Nov 24 18:07:34 crc kubenswrapper[4702]: I1124 18:07:34.939775 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe","Type":"ContainerStarted","Data":"85bedb32ff537c568a84aff55c07f4fe037efc5ed4d5ac3f92071a2a606fe643"} Nov 24 18:07:35 crc kubenswrapper[4702]: I1124 18:07:35.657948 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9abfc19-b09d-4c84-bb5b-ae4dc83dae37" path="/var/lib/kubelet/pods/d9abfc19-b09d-4c84-bb5b-ae4dc83dae37/volumes" Nov 24 18:07:35 crc kubenswrapper[4702]: I1124 18:07:35.948200 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe","Type":"ContainerStarted","Data":"f4c7d1e0bc6f89c667eb2abb81b835e2ddb7fdb02feb9da33f586d66cb75145f"} Nov 24 18:07:35 crc kubenswrapper[4702]: I1124 18:07:35.949136 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe","Type":"ContainerStarted","Data":"f388af45c205dd2a6cdd9312b20c3307c3ed81bb095add023d7cf0bcb9d57cdc"} Nov 24 18:07:35 crc kubenswrapper[4702]: I1124 18:07:35.949200 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"7ce6c7a2-7aeb-4ad5-9339-08635b05cefe","Type":"ContainerStarted","Data":"7d76fb9936e10e8d4a739d74e03bec3acedaf56188825917b9c87258e038d2be"} Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.226071 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.226736 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.226766 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.255331 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.258638 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.270859 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:41 crc kubenswrapper[4702]: I1124 18:07:41.281545 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=8.281525709 podStartE2EDuration="8.281525709s" podCreationTimestamp="2025-11-24 18:07:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:35.979502065 +0000 UTC m=+1145.220243229" watchObservedRunningTime="2025-11-24 18:07:41.281525709 +0000 UTC m=+1150.522266873" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.006168 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.006260 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.006287 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.018585 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.020421 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:42 crc kubenswrapper[4702]: I1124 18:07:42.022597 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.607452 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.607733 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.608598 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.636454 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.639938 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:44 crc kubenswrapper[4702]: I1124 18:07:44.644307 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.017063 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.017118 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.017135 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.028192 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.028302 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:45 crc kubenswrapper[4702]: I1124 18:07:45.028817 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Nov 24 18:07:52 crc kubenswrapper[4702]: I1124 18:07:52.482753 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:07:52 crc kubenswrapper[4702]: I1124 18:07:52.483236 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:07:52 crc kubenswrapper[4702]: I1124 18:07:52.483294 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 18:07:52 crc kubenswrapper[4702]: I1124 18:07:52.484196 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:07:52 crc kubenswrapper[4702]: I1124 18:07:52.484250 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29" gracePeriod=600 Nov 24 18:07:53 crc kubenswrapper[4702]: I1124 18:07:53.076646 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29" exitCode=0 Nov 24 18:07:53 crc kubenswrapper[4702]: I1124 18:07:53.076695 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29"} Nov 24 18:07:53 crc kubenswrapper[4702]: I1124 18:07:53.076737 4702 scope.go:117] "RemoveContainer" containerID="4011a188ddc8db486f09768596b60bde8bdda093d9026f101b6e07b95e6c1b9c" Nov 24 18:07:54 crc kubenswrapper[4702]: I1124 18:07:54.088007 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89"} Nov 24 18:10:22 crc kubenswrapper[4702]: I1124 18:10:22.482773 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:10:22 crc kubenswrapper[4702]: I1124 18:10:22.483439 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:10:52 crc kubenswrapper[4702]: I1124 18:10:52.482759 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:10:52 crc kubenswrapper[4702]: I1124 18:10:52.483381 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:11:22 crc kubenswrapper[4702]: I1124 18:11:22.483164 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:11:22 crc kubenswrapper[4702]: I1124 18:11:22.483653 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:11:22 crc kubenswrapper[4702]: I1124 18:11:22.483698 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 18:11:22 crc kubenswrapper[4702]: I1124 18:11:22.484350 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:11:22 crc kubenswrapper[4702]: I1124 18:11:22.484415 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89" gracePeriod=600 Nov 24 18:11:23 crc kubenswrapper[4702]: I1124 18:11:23.643908 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89" exitCode=0 Nov 24 18:11:23 crc kubenswrapper[4702]: I1124 18:11:23.643998 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89"} Nov 24 18:11:23 crc kubenswrapper[4702]: I1124 18:11:23.644295 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038"} Nov 24 18:11:23 crc kubenswrapper[4702]: I1124 18:11:23.644317 4702 scope.go:117] "RemoveContainer" containerID="ac12e6b477ebde666e0caac9bb9b30205473c61443a295d042de29a7b9e33c29" Nov 24 18:11:37 crc kubenswrapper[4702]: I1124 18:11:37.474219 4702 scope.go:117] "RemoveContainer" containerID="026e5276b3070dfe579cce4db53ef0fe5834c55aae623db888637cc4bcfd772d" Nov 24 18:11:37 crc kubenswrapper[4702]: I1124 18:11:37.495903 4702 scope.go:117] "RemoveContainer" containerID="3a4316293c2d9a765657478c2906e5a8682ce745225654810e6c8749b5eb4bcb" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.175749 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.177635 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.236369 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.244174 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.244267 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.244325 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpwqg\" (UniqueName: \"kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.345612 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.345689 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.345735 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpwqg\" (UniqueName: \"kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.346120 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.346261 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.365136 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpwqg\" (UniqueName: \"kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg\") pod \"community-operators-wk2rt\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.495011 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:13 crc kubenswrapper[4702]: I1124 18:12:13.903405 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:14 crc kubenswrapper[4702]: I1124 18:12:14.015973 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerStarted","Data":"5a0b8ad268cd847a48af3d7c3f08c13f93799eee23c14b7e6a1dfe49784b0c70"} Nov 24 18:12:15 crc kubenswrapper[4702]: I1124 18:12:15.023256 4702 generic.go:334] "Generic (PLEG): container finished" podID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerID="c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b" exitCode=0 Nov 24 18:12:15 crc kubenswrapper[4702]: I1124 18:12:15.023305 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerDied","Data":"c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b"} Nov 24 18:12:15 crc kubenswrapper[4702]: I1124 18:12:15.025070 4702 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 18:12:16 crc kubenswrapper[4702]: I1124 18:12:16.043609 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerStarted","Data":"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741"} Nov 24 18:12:17 crc kubenswrapper[4702]: I1124 18:12:17.051540 4702 generic.go:334] "Generic (PLEG): container finished" podID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerID="2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741" exitCode=0 Nov 24 18:12:17 crc kubenswrapper[4702]: I1124 18:12:17.051580 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerDied","Data":"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741"} Nov 24 18:12:19 crc kubenswrapper[4702]: I1124 18:12:19.069567 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerStarted","Data":"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a"} Nov 24 18:12:19 crc kubenswrapper[4702]: I1124 18:12:19.096729 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wk2rt" podStartSLOduration=3.172873408 podStartE2EDuration="6.096706748s" podCreationTimestamp="2025-11-24 18:12:13 +0000 UTC" firstStartedPulling="2025-11-24 18:12:15.02484304 +0000 UTC m=+1424.265584204" lastFinishedPulling="2025-11-24 18:12:17.94867638 +0000 UTC m=+1427.189417544" observedRunningTime="2025-11-24 18:12:19.090466501 +0000 UTC m=+1428.331207675" watchObservedRunningTime="2025-11-24 18:12:19.096706748 +0000 UTC m=+1428.337447912" Nov 24 18:12:23 crc kubenswrapper[4702]: I1124 18:12:23.495641 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:23 crc kubenswrapper[4702]: I1124 18:12:23.496045 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:23 crc kubenswrapper[4702]: I1124 18:12:23.534967 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:24 crc kubenswrapper[4702]: I1124 18:12:24.151303 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:24 crc kubenswrapper[4702]: I1124 18:12:24.197282 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.116538 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wk2rt" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="registry-server" containerID="cri-o://4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a" gracePeriod=2 Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.170421 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.171926 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.181437 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.308699 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.308760 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5xg9\" (UniqueName: \"kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.309095 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.411228 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.411599 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.411674 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5xg9\" (UniqueName: \"kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.411789 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.412020 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.436159 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5xg9\" (UniqueName: \"kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9\") pod \"redhat-marketplace-nrfp9\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.496644 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.576116 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.616428 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpwqg\" (UniqueName: \"kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg\") pod \"a517582a-ffc9-485c-9b4b-a75115abaadd\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.616504 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content\") pod \"a517582a-ffc9-485c-9b4b-a75115abaadd\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.616534 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities\") pod \"a517582a-ffc9-485c-9b4b-a75115abaadd\" (UID: \"a517582a-ffc9-485c-9b4b-a75115abaadd\") " Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.617995 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities" (OuterVolumeSpecName: "utilities") pod "a517582a-ffc9-485c-9b4b-a75115abaadd" (UID: "a517582a-ffc9-485c-9b4b-a75115abaadd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.621029 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg" (OuterVolumeSpecName: "kube-api-access-qpwqg") pod "a517582a-ffc9-485c-9b4b-a75115abaadd" (UID: "a517582a-ffc9-485c-9b4b-a75115abaadd"). InnerVolumeSpecName "kube-api-access-qpwqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.720966 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.721345 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpwqg\" (UniqueName: \"kubernetes.io/projected/a517582a-ffc9-485c-9b4b-a75115abaadd-kube-api-access-qpwqg\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:26 crc kubenswrapper[4702]: I1124 18:12:26.799129 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.127782 4702 generic.go:334] "Generic (PLEG): container finished" podID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerID="4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a" exitCode=0 Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.127892 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerDied","Data":"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a"} Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.127921 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wk2rt" event={"ID":"a517582a-ffc9-485c-9b4b-a75115abaadd","Type":"ContainerDied","Data":"5a0b8ad268cd847a48af3d7c3f08c13f93799eee23c14b7e6a1dfe49784b0c70"} Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.127930 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wk2rt" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.127944 4702 scope.go:117] "RemoveContainer" containerID="4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.130519 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerStarted","Data":"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3"} Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.130545 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerStarted","Data":"c20c81b5007f5af962dc48af3a45d4a8fa033400ce42fcb1167832422af98ca6"} Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.161712 4702 scope.go:117] "RemoveContainer" containerID="2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.180368 4702 scope.go:117] "RemoveContainer" containerID="c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.195619 4702 scope.go:117] "RemoveContainer" containerID="4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a" Nov 24 18:12:27 crc kubenswrapper[4702]: E1124 18:12:27.196169 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a\": container with ID starting with 4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a not found: ID does not exist" containerID="4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.196229 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a"} err="failed to get container status \"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a\": rpc error: code = NotFound desc = could not find container \"4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a\": container with ID starting with 4e16fb4f429a0d89817f0d6ce60f9240128909ef27b3e848fa517ee8f063234a not found: ID does not exist" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.196256 4702 scope.go:117] "RemoveContainer" containerID="2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741" Nov 24 18:12:27 crc kubenswrapper[4702]: E1124 18:12:27.196573 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741\": container with ID starting with 2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741 not found: ID does not exist" containerID="2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.196599 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741"} err="failed to get container status \"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741\": rpc error: code = NotFound desc = could not find container \"2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741\": container with ID starting with 2f5e46d08246d92b07f5d14a58cb0a798cb4a7935f12fa3b5c2f0a3401015741 not found: ID does not exist" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.196616 4702 scope.go:117] "RemoveContainer" containerID="c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b" Nov 24 18:12:27 crc kubenswrapper[4702]: E1124 18:12:27.196886 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b\": container with ID starting with c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b not found: ID does not exist" containerID="c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.196916 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b"} err="failed to get container status \"c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b\": rpc error: code = NotFound desc = could not find container \"c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b\": container with ID starting with c5db815cd008de50d0dcba6ee520c9dda614f6828532e8b335b71bbdccc6da7b not found: ID does not exist" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.607789 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a517582a-ffc9-485c-9b4b-a75115abaadd" (UID: "a517582a-ffc9-485c-9b4b-a75115abaadd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.635139 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a517582a-ffc9-485c-9b4b-a75115abaadd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.747271 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:27 crc kubenswrapper[4702]: I1124 18:12:27.754082 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wk2rt"] Nov 24 18:12:28 crc kubenswrapper[4702]: I1124 18:12:28.141228 4702 generic.go:334] "Generic (PLEG): container finished" podID="20534971-fa4c-49f6-9197-a1bdece14735" containerID="3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3" exitCode=0 Nov 24 18:12:28 crc kubenswrapper[4702]: I1124 18:12:28.141275 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerDied","Data":"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3"} Nov 24 18:12:29 crc kubenswrapper[4702]: I1124 18:12:29.151132 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerStarted","Data":"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329"} Nov 24 18:12:29 crc kubenswrapper[4702]: I1124 18:12:29.659669 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" path="/var/lib/kubelet/pods/a517582a-ffc9-485c-9b4b-a75115abaadd/volumes" Nov 24 18:12:30 crc kubenswrapper[4702]: I1124 18:12:30.160018 4702 generic.go:334] "Generic (PLEG): container finished" podID="20534971-fa4c-49f6-9197-a1bdece14735" containerID="f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329" exitCode=0 Nov 24 18:12:30 crc kubenswrapper[4702]: I1124 18:12:30.160064 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerDied","Data":"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329"} Nov 24 18:12:32 crc kubenswrapper[4702]: I1124 18:12:32.174215 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerStarted","Data":"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa"} Nov 24 18:12:32 crc kubenswrapper[4702]: I1124 18:12:32.201562 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nrfp9" podStartSLOduration=2.656230787 podStartE2EDuration="6.201542951s" podCreationTimestamp="2025-11-24 18:12:26 +0000 UTC" firstStartedPulling="2025-11-24 18:12:28.143275556 +0000 UTC m=+1437.384016730" lastFinishedPulling="2025-11-24 18:12:31.68858773 +0000 UTC m=+1440.929328894" observedRunningTime="2025-11-24 18:12:32.193501766 +0000 UTC m=+1441.434242940" watchObservedRunningTime="2025-11-24 18:12:32.201542951 +0000 UTC m=+1441.442284125" Nov 24 18:12:36 crc kubenswrapper[4702]: I1124 18:12:36.576686 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:36 crc kubenswrapper[4702]: I1124 18:12:36.577196 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:36 crc kubenswrapper[4702]: I1124 18:12:36.616666 4702 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:37 crc kubenswrapper[4702]: I1124 18:12:37.245391 4702 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:37 crc kubenswrapper[4702]: I1124 18:12:37.289702 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:37 crc kubenswrapper[4702]: I1124 18:12:37.575171 4702 scope.go:117] "RemoveContainer" containerID="97e3b250a2227d505a38be8797939315cd3f0cfcbb65b3ed471e0ff50dfa828f" Nov 24 18:12:39 crc kubenswrapper[4702]: I1124 18:12:39.218516 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nrfp9" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="registry-server" containerID="cri-o://6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa" gracePeriod=2 Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.182355 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.239525 4702 generic.go:334] "Generic (PLEG): container finished" podID="20534971-fa4c-49f6-9197-a1bdece14735" containerID="6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa" exitCode=0 Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.239574 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerDied","Data":"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa"} Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.239603 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nrfp9" event={"ID":"20534971-fa4c-49f6-9197-a1bdece14735","Type":"ContainerDied","Data":"c20c81b5007f5af962dc48af3a45d4a8fa033400ce42fcb1167832422af98ca6"} Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.239623 4702 scope.go:117] "RemoveContainer" containerID="6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.240134 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nrfp9" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.255323 4702 scope.go:117] "RemoveContainer" containerID="f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.269787 4702 scope.go:117] "RemoveContainer" containerID="3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.300990 4702 scope.go:117] "RemoveContainer" containerID="6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa" Nov 24 18:12:40 crc kubenswrapper[4702]: E1124 18:12:40.302183 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa\": container with ID starting with 6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa not found: ID does not exist" containerID="6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.302275 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa"} err="failed to get container status \"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa\": rpc error: code = NotFound desc = could not find container \"6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa\": container with ID starting with 6bf2869eacc861ad805b459765b00bea60219ea9ac83d29a00283d6148b557fa not found: ID does not exist" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.302299 4702 scope.go:117] "RemoveContainer" containerID="f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329" Nov 24 18:12:40 crc kubenswrapper[4702]: E1124 18:12:40.302880 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329\": container with ID starting with f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329 not found: ID does not exist" containerID="f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.302907 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329"} err="failed to get container status \"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329\": rpc error: code = NotFound desc = could not find container \"f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329\": container with ID starting with f77e9fb755e8eaac14a09486bd05a0f905957f7694f9cacc3bd3c35f1abe9329 not found: ID does not exist" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.302923 4702 scope.go:117] "RemoveContainer" containerID="3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3" Nov 24 18:12:40 crc kubenswrapper[4702]: E1124 18:12:40.303184 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3\": container with ID starting with 3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3 not found: ID does not exist" containerID="3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.303202 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3"} err="failed to get container status \"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3\": rpc error: code = NotFound desc = could not find container \"3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3\": container with ID starting with 3de3082679a8992bf7b65cf6c67756004fa55aa277828b70b5bea15670172ac3 not found: ID does not exist" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.319083 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5xg9\" (UniqueName: \"kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9\") pod \"20534971-fa4c-49f6-9197-a1bdece14735\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.319123 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content\") pod \"20534971-fa4c-49f6-9197-a1bdece14735\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.319167 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities\") pod \"20534971-fa4c-49f6-9197-a1bdece14735\" (UID: \"20534971-fa4c-49f6-9197-a1bdece14735\") " Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.320275 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities" (OuterVolumeSpecName: "utilities") pod "20534971-fa4c-49f6-9197-a1bdece14735" (UID: "20534971-fa4c-49f6-9197-a1bdece14735"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.324142 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9" (OuterVolumeSpecName: "kube-api-access-v5xg9") pod "20534971-fa4c-49f6-9197-a1bdece14735" (UID: "20534971-fa4c-49f6-9197-a1bdece14735"). InnerVolumeSpecName "kube-api-access-v5xg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.336646 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20534971-fa4c-49f6-9197-a1bdece14735" (UID: "20534971-fa4c-49f6-9197-a1bdece14735"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.421331 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5xg9\" (UniqueName: \"kubernetes.io/projected/20534971-fa4c-49f6-9197-a1bdece14735-kube-api-access-v5xg9\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.421367 4702 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.421376 4702 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20534971-fa4c-49f6-9197-a1bdece14735-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.570311 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:40 crc kubenswrapper[4702]: I1124 18:12:40.576658 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nrfp9"] Nov 24 18:12:41 crc kubenswrapper[4702]: I1124 18:12:41.656028 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20534971-fa4c-49f6-9197-a1bdece14735" path="/var/lib/kubelet/pods/20534971-fa4c-49f6-9197-a1bdece14735/volumes" Nov 24 18:13:34 crc kubenswrapper[4702]: I1124 18:13:34.047909 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-0eaa-account-create-update-594nr"] Nov 24 18:13:34 crc kubenswrapper[4702]: I1124 18:13:34.057486 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-db-create-lh7m6"] Nov 24 18:13:34 crc kubenswrapper[4702]: I1124 18:13:34.064648 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-0eaa-account-create-update-594nr"] Nov 24 18:13:34 crc kubenswrapper[4702]: I1124 18:13:34.072283 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-db-create-lh7m6"] Nov 24 18:13:35 crc kubenswrapper[4702]: I1124 18:13:35.657068 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73321ea7-21d6-42e7-be3f-311cbcc3dfed" path="/var/lib/kubelet/pods/73321ea7-21d6-42e7-be3f-311cbcc3dfed/volumes" Nov 24 18:13:35 crc kubenswrapper[4702]: I1124 18:13:35.657648 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0d6d4fb-8a29-4170-bba6-5416855a535a" path="/var/lib/kubelet/pods/f0d6d4fb-8a29-4170-bba6-5416855a535a/volumes" Nov 24 18:13:37 crc kubenswrapper[4702]: I1124 18:13:37.663192 4702 scope.go:117] "RemoveContainer" containerID="3bf99c29832af57966e8820bb2787f9a47c6a7b62dd131deb4605f7aaa3105a7" Nov 24 18:13:37 crc kubenswrapper[4702]: I1124 18:13:37.707264 4702 scope.go:117] "RemoveContainer" containerID="771fb315b724b66aea83d0565100ca3af55c5bb493f27eebd99db50072d5ae77" Nov 24 18:13:37 crc kubenswrapper[4702]: I1124 18:13:37.723732 4702 scope.go:117] "RemoveContainer" containerID="9c79b6ae1776f03c2e85977f2b22917ef244f7aadf9356d731bb355fbfb5c95c" Nov 24 18:13:37 crc kubenswrapper[4702]: I1124 18:13:37.750570 4702 scope.go:117] "RemoveContainer" containerID="9287b841b67760c829ae21cf4a51e1b835940f4a03cee658252334da218492ae" Nov 24 18:13:50 crc kubenswrapper[4702]: I1124 18:13:50.033254 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-4k497"] Nov 24 18:13:50 crc kubenswrapper[4702]: I1124 18:13:50.038164 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-4k497"] Nov 24 18:13:51 crc kubenswrapper[4702]: I1124 18:13:51.702347 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3646f1dd-f4f1-4bdd-9f48-a2ac45542a14" path="/var/lib/kubelet/pods/3646f1dd-f4f1-4bdd-9f48-a2ac45542a14/volumes" Nov 24 18:13:52 crc kubenswrapper[4702]: I1124 18:13:52.482662 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:13:52 crc kubenswrapper[4702]: I1124 18:13:52.482726 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:13:56 crc kubenswrapper[4702]: I1124 18:13:56.022544 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-r5jmd"] Nov 24 18:13:56 crc kubenswrapper[4702]: I1124 18:13:56.027055 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-r5jmd"] Nov 24 18:13:57 crc kubenswrapper[4702]: I1124 18:13:57.656762 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7c02c76-0859-42bc-b1ac-4aeddd431161" path="/var/lib/kubelet/pods/e7c02c76-0859-42bc-b1ac-4aeddd431161/volumes" Nov 24 18:14:22 crc kubenswrapper[4702]: I1124 18:14:22.483243 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:14:22 crc kubenswrapper[4702]: I1124 18:14:22.483846 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:14:37 crc kubenswrapper[4702]: I1124 18:14:37.839333 4702 scope.go:117] "RemoveContainer" containerID="8888fdd1c725ff404aef9f5ff9f5c95a473980dadd67a82993d9db626f2e94b0" Nov 24 18:14:37 crc kubenswrapper[4702]: I1124 18:14:37.869221 4702 scope.go:117] "RemoveContainer" containerID="1453fdb48fabc746672d5daeeb8035c48c2b42c07cb0aa341e0ddb80679d42f9" Nov 24 18:14:52 crc kubenswrapper[4702]: I1124 18:14:52.483056 4702 patch_prober.go:28] interesting pod/machine-config-daemon-wmjst container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:14:52 crc kubenswrapper[4702]: I1124 18:14:52.483579 4702 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:14:52 crc kubenswrapper[4702]: I1124 18:14:52.483621 4702 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" Nov 24 18:14:52 crc kubenswrapper[4702]: I1124 18:14:52.484171 4702 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038"} pod="openshift-machine-config-operator/machine-config-daemon-wmjst" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:14:52 crc kubenswrapper[4702]: I1124 18:14:52.484215 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerName="machine-config-daemon" containerID="cri-o://5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" gracePeriod=600 Nov 24 18:14:53 crc kubenswrapper[4702]: E1124 18:14:53.112223 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:14:53 crc kubenswrapper[4702]: I1124 18:14:53.170538 4702 generic.go:334] "Generic (PLEG): container finished" podID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" exitCode=0 Nov 24 18:14:53 crc kubenswrapper[4702]: I1124 18:14:53.170572 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerDied","Data":"5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038"} Nov 24 18:14:53 crc kubenswrapper[4702]: I1124 18:14:53.170637 4702 scope.go:117] "RemoveContainer" containerID="ba35f1bec30bc67d4d164351b74ccc0ad6f0708cc6542b6c85f662fcb2b54c89" Nov 24 18:14:53 crc kubenswrapper[4702]: I1124 18:14:53.171173 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:14:53 crc kubenswrapper[4702]: E1124 18:14:53.173276 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.135653 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28"] Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136372 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136385 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136396 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136402 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136414 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136420 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136437 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136443 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136462 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136468 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4702]: E1124 18:15:00.136506 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136519 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136689 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="20534971-fa4c-49f6-9197-a1bdece14735" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.136715 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="a517582a-ffc9-485c-9b4b-a75115abaadd" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.137243 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.139066 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.141569 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.147452 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28"] Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.291765 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st992\" (UniqueName: \"kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.291953 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.292015 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.393971 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.394021 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.394103 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st992\" (UniqueName: \"kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.395413 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.400228 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.410876 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st992\" (UniqueName: \"kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992\") pod \"collect-profiles-29400135-n8f28\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.455703 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:00 crc kubenswrapper[4702]: I1124 18:15:00.644074 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28"] Nov 24 18:15:01 crc kubenswrapper[4702]: I1124 18:15:01.229491 4702 generic.go:334] "Generic (PLEG): container finished" podID="12faa764-894c-4427-92a6-0d4e89004f17" containerID="9807623dd3a7905be01aef806271238ed18de03c0ebdc5b18c8357999c79faf2" exitCode=0 Nov 24 18:15:01 crc kubenswrapper[4702]: I1124 18:15:01.229725 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" event={"ID":"12faa764-894c-4427-92a6-0d4e89004f17","Type":"ContainerDied","Data":"9807623dd3a7905be01aef806271238ed18de03c0ebdc5b18c8357999c79faf2"} Nov 24 18:15:01 crc kubenswrapper[4702]: I1124 18:15:01.229843 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" event={"ID":"12faa764-894c-4427-92a6-0d4e89004f17","Type":"ContainerStarted","Data":"6d91936c42e94a28d601e1deaa625414332c6df95fef74ffa15e78291ad08b4e"} Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.488235 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.625992 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st992\" (UniqueName: \"kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992\") pod \"12faa764-894c-4427-92a6-0d4e89004f17\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.626061 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume\") pod \"12faa764-894c-4427-92a6-0d4e89004f17\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.626152 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume\") pod \"12faa764-894c-4427-92a6-0d4e89004f17\" (UID: \"12faa764-894c-4427-92a6-0d4e89004f17\") " Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.626981 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume" (OuterVolumeSpecName: "config-volume") pod "12faa764-894c-4427-92a6-0d4e89004f17" (UID: "12faa764-894c-4427-92a6-0d4e89004f17"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.631640 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992" (OuterVolumeSpecName: "kube-api-access-st992") pod "12faa764-894c-4427-92a6-0d4e89004f17" (UID: "12faa764-894c-4427-92a6-0d4e89004f17"). InnerVolumeSpecName "kube-api-access-st992". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.631958 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "12faa764-894c-4427-92a6-0d4e89004f17" (UID: "12faa764-894c-4427-92a6-0d4e89004f17"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.728070 4702 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12faa764-894c-4427-92a6-0d4e89004f17-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.728102 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st992\" (UniqueName: \"kubernetes.io/projected/12faa764-894c-4427-92a6-0d4e89004f17-kube-api-access-st992\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:02 crc kubenswrapper[4702]: I1124 18:15:02.728112 4702 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12faa764-894c-4427-92a6-0d4e89004f17-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:03 crc kubenswrapper[4702]: I1124 18:15:03.243032 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" event={"ID":"12faa764-894c-4427-92a6-0d4e89004f17","Type":"ContainerDied","Data":"6d91936c42e94a28d601e1deaa625414332c6df95fef74ffa15e78291ad08b4e"} Nov 24 18:15:03 crc kubenswrapper[4702]: I1124 18:15:03.243067 4702 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d91936c42e94a28d601e1deaa625414332c6df95fef74ffa15e78291ad08b4e" Nov 24 18:15:03 crc kubenswrapper[4702]: I1124 18:15:03.243091 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-n8f28" Nov 24 18:15:03 crc kubenswrapper[4702]: E1124 18:15:03.358778 4702 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12faa764_894c_4427_92a6_0d4e89004f17.slice/crio-6d91936c42e94a28d601e1deaa625414332c6df95fef74ffa15e78291ad08b4e\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12faa764_894c_4427_92a6_0d4e89004f17.slice\": RecentStats: unable to find data in memory cache]" Nov 24 18:15:05 crc kubenswrapper[4702]: I1124 18:15:05.648602 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:15:05 crc kubenswrapper[4702]: E1124 18:15:05.649158 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.401036 4702 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fb6wv/must-gather-sl5cn"] Nov 24 18:15:08 crc kubenswrapper[4702]: E1124 18:15:08.401854 4702 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12faa764-894c-4427-92a6-0d4e89004f17" containerName="collect-profiles" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.401875 4702 state_mem.go:107] "Deleted CPUSet assignment" podUID="12faa764-894c-4427-92a6-0d4e89004f17" containerName="collect-profiles" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.402159 4702 memory_manager.go:354] "RemoveStaleState removing state" podUID="12faa764-894c-4427-92a6-0d4e89004f17" containerName="collect-profiles" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.403389 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.405534 4702 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-fb6wv"/"default-dockercfg-tnbmm" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.405785 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fb6wv"/"kube-root-ca.crt" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.410305 4702 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fb6wv"/"openshift-service-ca.crt" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.434491 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fb6wv/must-gather-sl5cn"] Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.518642 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.518925 4702 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qmdg\" (UniqueName: \"kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.620360 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.620507 4702 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qmdg\" (UniqueName: \"kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.620847 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.637844 4702 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qmdg\" (UniqueName: \"kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg\") pod \"must-gather-sl5cn\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:08 crc kubenswrapper[4702]: I1124 18:15:08.727007 4702 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:15:09 crc kubenswrapper[4702]: I1124 18:15:09.120637 4702 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fb6wv/must-gather-sl5cn"] Nov 24 18:15:09 crc kubenswrapper[4702]: I1124 18:15:09.281748 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" event={"ID":"aced5cea-c142-4d07-950f-587505f60d79","Type":"ContainerStarted","Data":"ac236b30692385224c4829ea0b6321378b53a5db9e39cb67e7a3f28b1e150012"} Nov 24 18:15:13 crc kubenswrapper[4702]: I1124 18:15:13.323753 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" event={"ID":"aced5cea-c142-4d07-950f-587505f60d79","Type":"ContainerStarted","Data":"eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d"} Nov 24 18:15:13 crc kubenswrapper[4702]: I1124 18:15:13.324412 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" event={"ID":"aced5cea-c142-4d07-950f-587505f60d79","Type":"ContainerStarted","Data":"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01"} Nov 24 18:15:13 crc kubenswrapper[4702]: I1124 18:15:13.344404 4702 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" podStartSLOduration=1.9486200660000002 podStartE2EDuration="5.34438779s" podCreationTimestamp="2025-11-24 18:15:08 +0000 UTC" firstStartedPulling="2025-11-24 18:15:09.129115974 +0000 UTC m=+1598.369857138" lastFinishedPulling="2025-11-24 18:15:12.524883698 +0000 UTC m=+1601.765624862" observedRunningTime="2025-11-24 18:15:13.340941788 +0000 UTC m=+1602.581682972" watchObservedRunningTime="2025-11-24 18:15:13.34438779 +0000 UTC m=+1602.585128944" Nov 24 18:15:18 crc kubenswrapper[4702]: I1124 18:15:18.648382 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:15:18 crc kubenswrapper[4702]: E1124 18:15:18.649098 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:15:33 crc kubenswrapper[4702]: I1124 18:15:33.648476 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:15:33 crc kubenswrapper[4702]: E1124 18:15:33.649280 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.127215 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/util/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.312867 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/pull/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.325998 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/util/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.329689 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/pull/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.464977 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/pull/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.496088 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/util/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.496265 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_440bfe09fdefc314a717f31a57d7ebfa5bfc58c8ed9bd7a4e0a57fadb45nzwd_85f0cdb4-6612-4d20-ab62-f7a82947bf1f/extract/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.654055 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/util/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.807326 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/pull/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.812569 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/util/0.log" Nov 24 18:15:44 crc kubenswrapper[4702]: I1124 18:15:44.882052 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.032348 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.050969 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.058492 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5718f74582c40b759b598b113d8eef2abfd5dff5a164d10987c01bbdb94vq6m_4181b945-25f4-44ce-8bf9-f9fd2b0b61fd/extract/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.190616 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.330263 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.342484 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.359444 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.499900 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.534287 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.538047 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dvdtk5_f97e8ad2-ce08-473a-b864-41b444e9fe49/extract/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.653084 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.813581 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/util/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.848898 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/pull/0.log" Nov 24 18:15:45 crc kubenswrapper[4702]: I1124 18:15:45.855690 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.009609 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.011001 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.042509 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62924b48d7c39bd6b89ba2946b1b573a02aaa01e9968a59c70da1ba1d6mpnkd_9905111d-899a-490f-813a-027435cc85bf/extract/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.165424 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.309275 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.330654 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.366083 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.521695 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.544008 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.570224 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_62eccd409f16609fda8c131ec699dcbc23034b86aeec0473e3f9799434tw6wm_a4880862-6c76-47c4-9046-cf95cf711d7e/extract/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.648484 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:15:46 crc kubenswrapper[4702]: E1124 18:15:46.648715 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.694975 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.858832 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/util/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.859573 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/pull/0.log" Nov 24 18:15:46 crc kubenswrapper[4702]: I1124 18:15:46.887321 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/pull/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.052838 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/extract/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.074594 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/util/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.091959 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_7abe4676e9c7174a0976b528ff13527e30f787694a732dea185c78a27cmrj8p_6b34ab2f-e0b1-429e-8b6d-7b8809671888/pull/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.125193 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/util/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.239877 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/util/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.263408 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/pull/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.267868 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/pull/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.460127 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/util/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.471068 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/pull/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.479029 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590w7rf8_c71d5f19-b4ff-4070-b5a7-71520ffa8b6e/extract/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.553555 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-d47996487-cmbrh_ba679587-111b-43ee-bde5-a810fb5f605e/manager/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.677362 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-index-lgh2x_71579ed0-43ea-4a86-a7dd-f7ab0351a1c0/registry-server/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.711245 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-79b8cddcd-6x5pw_b12d50df-848e-49f1-800c-316f9339557b/manager/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.770454 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-79b8cddcd-6x5pw_b12d50df-848e-49f1-800c-316f9339557b/kube-rbac-proxy/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.872152 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-index-sxlqv_81f6ab0d-36cf-46bd-8d94-cb790654a1b0/registry-server/0.log" Nov 24 18:15:47 crc kubenswrapper[4702]: I1124 18:15:47.922980 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6b49b55b86-l7qhw_f235ced9-aab3-4b84-b788-8155ca736b51/kube-rbac-proxy/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.061732 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6b49b55b86-l7qhw_f235ced9-aab3-4b84-b788-8155ca736b51/manager/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.139443 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-index-pcdn7_c5cf41b8-c818-4c3d-9b5c-e19a1eab4c18/registry-server/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.220248 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7c8667cbc8-rdxfl_0857a270-3c6c-4e5d-b7f9-4589b12beec7/manager/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.336552 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-index-xdjp4_bd669ceb-92b4-4d8a-b2f0-94ee4bdc5df1/registry-server/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.388090 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-74dcfc55d5-fj98w_967cb8ea-a50c-409e-8b5e-f91ae596762c/kube-rbac-proxy/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.412868 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-74dcfc55d5-fj98w_967cb8ea-a50c-409e-8b5e-f91ae596762c/manager/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.548063 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-index-bnffm_dfce7a84-c113-4ba0-a101-899a5a2b2140/registry-server/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.580317 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-779fc9694b-8fmcf_2970c5e6-ad7e-406f-b6a4-f49a4bbe38f6/operator/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.648287 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-index-n79bv_81d84f21-a3ee-4817-a631-03b27359f592/registry-server/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.721330 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bfd5974c7-qr9hz_9637afb3-11e9-4870-a0c6-564bb3983c36/kube-rbac-proxy/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.796602 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bfd5974c7-qr9hz_9637afb3-11e9-4870-a0c6-564bb3983c36/manager/0.log" Nov 24 18:15:48 crc kubenswrapper[4702]: I1124 18:15:48.842978 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-index-w2sb2_00cf497b-b7bb-4450-b5fd-fad217daba30/registry-server/0.log" Nov 24 18:15:57 crc kubenswrapper[4702]: I1124 18:15:57.648469 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:15:57 crc kubenswrapper[4702]: E1124 18:15:57.649213 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:16:01 crc kubenswrapper[4702]: I1124 18:16:01.421893 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-k4vgs_aa55d4fd-a2ed-4b4f-acb5-03b1704de025/control-plane-machine-set-operator/0.log" Nov 24 18:16:01 crc kubenswrapper[4702]: I1124 18:16:01.609068 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wq4v5_82d7d022-5521-4f60-b316-4101099d58ed/machine-api-operator/0.log" Nov 24 18:16:01 crc kubenswrapper[4702]: I1124 18:16:01.636287 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wq4v5_82d7d022-5521-4f60-b316-4101099d58ed/kube-rbac-proxy/0.log" Nov 24 18:16:08 crc kubenswrapper[4702]: I1124 18:16:08.648632 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:16:08 crc kubenswrapper[4702]: E1124 18:16:08.649539 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.094211 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6m56b_a5e78bfd-acd0-4668-9bc1-7e2f91859d00/controller/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.104462 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6m56b_a5e78bfd-acd0-4668-9bc1-7e2f91859d00/kube-rbac-proxy/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.272015 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-frr-files/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.443115 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-reloader/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.454277 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-frr-files/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.463919 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-metrics/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.506908 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-reloader/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.647841 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-frr-files/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.681544 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-reloader/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.684753 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-metrics/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.688963 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-metrics/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.828120 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-frr-files/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.832884 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-reloader/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.839714 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/cp-metrics/0.log" Nov 24 18:16:16 crc kubenswrapper[4702]: I1124 18:16:16.854064 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/controller/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.022522 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/frr-metrics/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.036503 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/kube-rbac-proxy/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.036509 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/kube-rbac-proxy-frr/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.209751 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/reloader/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.249136 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-snxq2_457d1440-b56e-496a-82a1-89d661eadc8e/frr-k8s-webhook-server/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.372247 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lr7vm_9cba0ae5-b6cc-4ade-b903-c7b28bbaf372/frr/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.455825 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5f77795d-q8krl_80458bb6-ef17-4651-a20d-0e9d9b7659a3/manager/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.543728 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-9fc6b78-vsd8l_c726fa9b-4677-4c40-8ebd-242f517a6375/webhook-server/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.614977 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2xjjq_e50e827d-eb1f-4401-80ca-c1b59cb02e75/kube-rbac-proxy/0.log" Nov 24 18:16:17 crc kubenswrapper[4702]: I1124 18:16:17.732481 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2xjjq_e50e827d-eb1f-4401-80ca-c1b59cb02e75/speaker/0.log" Nov 24 18:16:20 crc kubenswrapper[4702]: I1124 18:16:20.647556 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:16:20 crc kubenswrapper[4702]: E1124 18:16:20.648020 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.385660 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-1d6a-account-create-update-l4dz6_693f2168-99fc-4548-bf88-af6518389c3e/mariadb-account-create-update/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.536983 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-db-create-x2lkz_4dce0c6e-8084-4b53-b7d9-3615741d0f11/mariadb-database-create/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.540565 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-db-sync-bglfv_3488ae92-0040-4207-b23e-5c38723cebb3/glance-db-sync/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.691990 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-external-api-0_31cf16bb-c1c3-4dbb-9a07-c6317c2e070a/glance-api/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.732728 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-external-api-0_31cf16bb-c1c3-4dbb-9a07-c6317c2e070a/glance-log/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.732997 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-external-api-0_31cf16bb-c1c3-4dbb-9a07-c6317c2e070a/glance-httpd/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.875543 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-internal-api-0_7ce6c7a2-7aeb-4ad5-9339-08635b05cefe/glance-api/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.892914 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-internal-api-0_7ce6c7a2-7aeb-4ad5-9339-08635b05cefe/glance-httpd/0.log" Nov 24 18:16:30 crc kubenswrapper[4702]: I1124 18:16:30.929747 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_glance-default-internal-api-0_7ce6c7a2-7aeb-4ad5-9339-08635b05cefe/glance-log/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.295596 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_d49d1cbe-321e-4459-951f-f7efcc8ed02e/mysql-bootstrap/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.405034 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_keystone-5d44bcfcdf-szfx6_8cb198f9-f3d0-4d3b-b99e-95427f1bff17/keystone-api/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.543386 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_d49d1cbe-321e-4459-951f-f7efcc8ed02e/mysql-bootstrap/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.561233 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_d49d1cbe-321e-4459-951f-f7efcc8ed02e/galera/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.657696 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:16:31 crc kubenswrapper[4702]: E1124 18:16:31.657904 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.759255 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_43554378-0651-47bf-a8b8-658ad5843651/mysql-bootstrap/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.902395 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_43554378-0651-47bf-a8b8-658ad5843651/mysql-bootstrap/0.log" Nov 24 18:16:31 crc kubenswrapper[4702]: I1124 18:16:31.937301 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_43554378-0651-47bf-a8b8-658ad5843651/galera/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.207265 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_memcached-0_eabbfbdd-7dbc-4814-9c34-7b7a4f6d7154/memcached/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.231190 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_ea71d590-80f8-45c8-9db2-d163f5516941/mysql-bootstrap/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.360760 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_ea71d590-80f8-45c8-9db2-d163f5516941/mysql-bootstrap/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.371281 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_ea71d590-80f8-45c8-9db2-d163f5516941/galera/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.422963 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstackclient_82389de7-41e2-4820-97e4-bccaf40d3fd6/openstackclient/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.552790 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_ecd2bb28-4395-494d-944a-7f25b22e1561/setup-container/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.680606 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_ecd2bb28-4395-494d-944a-7f25b22e1561/setup-container/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.716131 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_ecd2bb28-4395-494d-944a-7f25b22e1561/rabbitmq/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.772431 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-proxy-547856594f-rvdc7_d65bf194-f05e-423a-a5b4-7acdce24e0c9/proxy-httpd/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.908027 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-proxy-547856594f-rvdc7_d65bf194-f05e-423a-a5b4-7acdce24e0c9/proxy-server/0.log" Nov 24 18:16:32 crc kubenswrapper[4702]: I1124 18:16:32.920675 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-ring-rebalance-pkkdm_14a2829d-0ef6-4544-aae9-f4bd9ddb061d/swift-ring-rebalance/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.079309 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/account-auditor/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.099417 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/account-reaper/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.117656 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/account-replicator/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.177127 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/account-server/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.265342 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/container-auditor/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.275371 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/container-replicator/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.300095 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/container-server/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.361113 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/container-updater/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.452007 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/object-expirer/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.465711 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/object-auditor/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.478559 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/object-replicator/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.564460 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/object-server/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.604335 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/object-updater/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.660842 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/rsync/0.log" Nov 24 18:16:33 crc kubenswrapper[4702]: I1124 18:16:33.662589 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_1dd79ba8-8f74-4d74-a1a7-86b0dcb3a0cb/swift-recon-cron/0.log" Nov 24 18:16:43 crc kubenswrapper[4702]: I1124 18:16:43.648347 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:16:43 crc kubenswrapper[4702]: E1124 18:16:43.649244 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.250668 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-utilities/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.403666 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-content/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.428580 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-content/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.442503 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-utilities/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.565777 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-content/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.580325 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/extract-utilities/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.771565 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-utilities/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.881346 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xt5hs_806486ea-2ebb-4171-915c-69170eaf3967/registry-server/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.920358 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-utilities/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.980385 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-content/0.log" Nov 24 18:16:45 crc kubenswrapper[4702]: I1124 18:16:45.980385 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-content/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.129067 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-utilities/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.172182 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/extract-content/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.389897 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/util/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.552680 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z67lw_4a27a95c-78d8-428d-8437-2469bee7ddad/registry-server/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.557577 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/util/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.609307 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/pull/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.620634 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/pull/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.808716 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/util/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.812155 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/extract/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.828532 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c658ck7_4b8235d7-a978-4f3a-ae4e-2bd3a856deac/pull/0.log" Nov 24 18:16:46 crc kubenswrapper[4702]: I1124 18:16:46.960222 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9fvdq_687de2b1-fcda-4c00-a295-1b5ee7ef64c2/marketplace-operator/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.024709 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.134543 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.155886 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-content/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.197200 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-content/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.328123 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.347429 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/extract-content/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.477173 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qnt56_8603ba78-8783-40be-b184-26376b1a6e9e/registry-server/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.655676 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.771014 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.801584 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-content/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.801659 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-content/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.953022 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-utilities/0.log" Nov 24 18:16:47 crc kubenswrapper[4702]: I1124 18:16:47.967624 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/extract-content/0.log" Nov 24 18:16:48 crc kubenswrapper[4702]: I1124 18:16:48.335252 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sdfgt_546f6b1b-0fd6-48ee-bce9-08ecb6cfa3a7/registry-server/0.log" Nov 24 18:16:55 crc kubenswrapper[4702]: I1124 18:16:55.648605 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:16:55 crc kubenswrapper[4702]: E1124 18:16:55.649108 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:17:08 crc kubenswrapper[4702]: I1124 18:17:08.648543 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:17:08 crc kubenswrapper[4702]: E1124 18:17:08.649403 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:17:19 crc kubenswrapper[4702]: I1124 18:17:19.652428 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:17:19 crc kubenswrapper[4702]: E1124 18:17:19.653164 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:17:22 crc kubenswrapper[4702]: I1124 18:17:22.040374 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-x2lkz"] Nov 24 18:17:22 crc kubenswrapper[4702]: I1124 18:17:22.046207 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6"] Nov 24 18:17:22 crc kubenswrapper[4702]: I1124 18:17:22.051987 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-x2lkz"] Nov 24 18:17:22 crc kubenswrapper[4702]: I1124 18:17:22.058221 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-1d6a-account-create-update-l4dz6"] Nov 24 18:17:23 crc kubenswrapper[4702]: I1124 18:17:23.661133 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dce0c6e-8084-4b53-b7d9-3615741d0f11" path="/var/lib/kubelet/pods/4dce0c6e-8084-4b53-b7d9-3615741d0f11/volumes" Nov 24 18:17:23 crc kubenswrapper[4702]: I1124 18:17:23.663089 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="693f2168-99fc-4548-bf88-af6518389c3e" path="/var/lib/kubelet/pods/693f2168-99fc-4548-bf88-af6518389c3e/volumes" Nov 24 18:17:29 crc kubenswrapper[4702]: I1124 18:17:29.048226 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bglfv"] Nov 24 18:17:29 crc kubenswrapper[4702]: I1124 18:17:29.054592 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bglfv"] Nov 24 18:17:29 crc kubenswrapper[4702]: I1124 18:17:29.656914 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3488ae92-0040-4207-b23e-5c38723cebb3" path="/var/lib/kubelet/pods/3488ae92-0040-4207-b23e-5c38723cebb3/volumes" Nov 24 18:17:34 crc kubenswrapper[4702]: I1124 18:17:34.647697 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:17:34 crc kubenswrapper[4702]: E1124 18:17:34.648496 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:17:37 crc kubenswrapper[4702]: I1124 18:17:37.947039 4702 scope.go:117] "RemoveContainer" containerID="fa698811d1744afb78f66fd76ced80f9ccad41a56d3dda4899a5d09c48aedb72" Nov 24 18:17:37 crc kubenswrapper[4702]: I1124 18:17:37.965461 4702 scope.go:117] "RemoveContainer" containerID="b55126b97c32842db46158d3483b300ad8f569cae749b93188625ce6c95485b6" Nov 24 18:17:38 crc kubenswrapper[4702]: I1124 18:17:38.019312 4702 scope.go:117] "RemoveContainer" containerID="07001201be966a74f935baa50edbd9110b1f176513aecaeaddfc8f767bca56ce" Nov 24 18:17:47 crc kubenswrapper[4702]: I1124 18:17:47.648976 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:17:47 crc kubenswrapper[4702]: E1124 18:17:47.649769 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:17:51 crc kubenswrapper[4702]: I1124 18:17:51.530895 4702 generic.go:334] "Generic (PLEG): container finished" podID="aced5cea-c142-4d07-950f-587505f60d79" containerID="d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01" exitCode=0 Nov 24 18:17:51 crc kubenswrapper[4702]: I1124 18:17:51.530970 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" event={"ID":"aced5cea-c142-4d07-950f-587505f60d79","Type":"ContainerDied","Data":"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01"} Nov 24 18:17:51 crc kubenswrapper[4702]: I1124 18:17:51.531724 4702 scope.go:117] "RemoveContainer" containerID="d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01" Nov 24 18:17:52 crc kubenswrapper[4702]: I1124 18:17:52.552834 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fb6wv_must-gather-sl5cn_aced5cea-c142-4d07-950f-587505f60d79/gather/0.log" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.086327 4702 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fb6wv/must-gather-sl5cn"] Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.087174 4702 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" podUID="aced5cea-c142-4d07-950f-587505f60d79" containerName="copy" containerID="cri-o://eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d" gracePeriod=2 Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.094625 4702 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fb6wv/must-gather-sl5cn"] Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.460373 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fb6wv_must-gather-sl5cn_aced5cea-c142-4d07-950f-587505f60d79/copy/0.log" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.461176 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.588421 4702 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fb6wv_must-gather-sl5cn_aced5cea-c142-4d07-950f-587505f60d79/copy/0.log" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.589228 4702 generic.go:334] "Generic (PLEG): container finished" podID="aced5cea-c142-4d07-950f-587505f60d79" containerID="eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d" exitCode=143 Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.589279 4702 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fb6wv/must-gather-sl5cn" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.589282 4702 scope.go:117] "RemoveContainer" containerID="eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.603873 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output\") pod \"aced5cea-c142-4d07-950f-587505f60d79\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.603973 4702 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qmdg\" (UniqueName: \"kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg\") pod \"aced5cea-c142-4d07-950f-587505f60d79\" (UID: \"aced5cea-c142-4d07-950f-587505f60d79\") " Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.608342 4702 scope.go:117] "RemoveContainer" containerID="d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.612822 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg" (OuterVolumeSpecName: "kube-api-access-2qmdg") pod "aced5cea-c142-4d07-950f-587505f60d79" (UID: "aced5cea-c142-4d07-950f-587505f60d79"). InnerVolumeSpecName "kube-api-access-2qmdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.689474 4702 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "aced5cea-c142-4d07-950f-587505f60d79" (UID: "aced5cea-c142-4d07-950f-587505f60d79"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.693120 4702 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aced5cea-c142-4d07-950f-587505f60d79" path="/var/lib/kubelet/pods/aced5cea-c142-4d07-950f-587505f60d79/volumes" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.705454 4702 scope.go:117] "RemoveContainer" containerID="eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d" Nov 24 18:17:59 crc kubenswrapper[4702]: E1124 18:17:59.705900 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d\": container with ID starting with eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d not found: ID does not exist" containerID="eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.705945 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d"} err="failed to get container status \"eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d\": rpc error: code = NotFound desc = could not find container \"eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d\": container with ID starting with eb0640ea92579ee5a6baf760fc158e5b2e1fbfd6385e5a8ae1d5bcd51f3cbb5d not found: ID does not exist" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.705975 4702 scope.go:117] "RemoveContainer" containerID="d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.705995 4702 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qmdg\" (UniqueName: \"kubernetes.io/projected/aced5cea-c142-4d07-950f-587505f60d79-kube-api-access-2qmdg\") on node \"crc\" DevicePath \"\"" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.706026 4702 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/aced5cea-c142-4d07-950f-587505f60d79-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 18:17:59 crc kubenswrapper[4702]: E1124 18:17:59.706445 4702 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01\": container with ID starting with d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01 not found: ID does not exist" containerID="d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01" Nov 24 18:17:59 crc kubenswrapper[4702]: I1124 18:17:59.706480 4702 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01"} err="failed to get container status \"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01\": rpc error: code = NotFound desc = could not find container \"d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01\": container with ID starting with d4f2f78efa2e9b76dd4f36f0d55def6dd7434eeadf46df5cdeaadf176ff01e01 not found: ID does not exist" Nov 24 18:18:02 crc kubenswrapper[4702]: I1124 18:18:02.648085 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:18:02 crc kubenswrapper[4702]: E1124 18:18:02.648884 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:18:13 crc kubenswrapper[4702]: I1124 18:18:13.648606 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:18:13 crc kubenswrapper[4702]: E1124 18:18:13.649334 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:18:28 crc kubenswrapper[4702]: I1124 18:18:28.648540 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:18:28 crc kubenswrapper[4702]: E1124 18:18:28.649631 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:18:39 crc kubenswrapper[4702]: I1124 18:18:39.648602 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:18:39 crc kubenswrapper[4702]: E1124 18:18:39.649339 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:18:51 crc kubenswrapper[4702]: I1124 18:18:51.655759 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:18:51 crc kubenswrapper[4702]: E1124 18:18:51.656455 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:19:03 crc kubenswrapper[4702]: I1124 18:19:03.648305 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:19:03 crc kubenswrapper[4702]: E1124 18:19:03.649041 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:19:18 crc kubenswrapper[4702]: I1124 18:19:18.648190 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:19:18 crc kubenswrapper[4702]: E1124 18:19:18.648868 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:19:32 crc kubenswrapper[4702]: I1124 18:19:32.648287 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:19:32 crc kubenswrapper[4702]: E1124 18:19:32.649078 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:19:47 crc kubenswrapper[4702]: I1124 18:19:47.648519 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:19:47 crc kubenswrapper[4702]: E1124 18:19:47.649574 4702 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wmjst_openshift-machine-config-operator(9a77fa32-4f49-4b02-ac4a-fbad4d33e499)\"" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" podUID="9a77fa32-4f49-4b02-ac4a-fbad4d33e499" Nov 24 18:20:01 crc kubenswrapper[4702]: I1124 18:20:01.653649 4702 scope.go:117] "RemoveContainer" containerID="5a2283b81681db64815275ada03ae16ef743531f9ed460469ace66eaf4225038" Nov 24 18:20:02 crc kubenswrapper[4702]: I1124 18:20:02.420226 4702 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wmjst" event={"ID":"9a77fa32-4f49-4b02-ac4a-fbad4d33e499","Type":"ContainerStarted","Data":"01744b71937fe555cd7550af0dd5fe4af3c29baaba918ccb27c82c1874f26b27"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111120772024442 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111120773017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111114643016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111114644015453 5ustar corecore